diff options
| -rw-r--r-- | Kconfig | 22 | ||||
| -rw-r--r-- | Makefile | 8 | ||||
| -rw-r--r-- | acpi.c | 396 | ||||
| -rw-r--r-- | acpi.h | 76 | ||||
| -rw-r--r-- | ce.h | 11 | ||||
| -rw-r--r-- | core.c | 1387 | ||||
| -rw-r--r-- | core.h | 493 | ||||
| -rw-r--r-- | coredump.c | 54 | ||||
| -rw-r--r-- | coredump.h | 81 | ||||
| -rw-r--r-- | dbring.c | 2 | ||||
| -rw-r--r-- | debug.c | 8 | ||||
| -rw-r--r-- | debug.h | 8 | ||||
| -rw-r--r-- | debugfs.c | 107 | ||||
| -rw-r--r-- | debugfs.h | 34 | ||||
| -rw-r--r-- | debugfs_htt_stats.c | 4389 | ||||
| -rw-r--r-- | debugfs_htt_stats.h | 1494 | ||||
| -rw-r--r-- | dp.c | 366 | ||||
| -rw-r--r-- | dp.h | 98 | ||||
| -rw-r--r-- | dp_mon.c | 348 | ||||
| -rw-r--r-- | dp_mon.h | 4 | ||||
| -rw-r--r-- | dp_rx.c | 988 | ||||
| -rw-r--r-- | dp_rx.h | 23 | ||||
| -rw-r--r-- | dp_tx.c | 296 | ||||
| -rw-r--r-- | dp_tx.h | 7 | ||||
| -rw-r--r-- | fw.c | 171 | ||||
| -rw-r--r-- | fw.h | 36 | ||||
| -rw-r--r-- | hal.c | 430 | ||||
| -rw-r--r-- | hal.h | 45 | ||||
| -rw-r--r-- | hal_desc.h | 99 | ||||
| -rw-r--r-- | hal_rx.c | 29 | ||||
| -rw-r--r-- | hal_rx.h | 66 | ||||
| -rw-r--r-- | hal_tx.h | 4 | ||||
| -rw-r--r-- | hif.h | 45 | ||||
| -rw-r--r-- | htc.c | 10 | ||||
| -rw-r--r-- | hw.c | 101 | ||||
| -rw-r--r-- | hw.h | 72 | ||||
| -rw-r--r-- | mac.c | 6737 | ||||
| -rw-r--r-- | mac.h | 52 | ||||
| -rw-r--r-- | mhi.c | 186 | ||||
| -rw-r--r-- | mhi.h | 7 | ||||
| -rw-r--r-- | p2p.c | 146 | ||||
| -rw-r--r-- | p2p.h | 24 | ||||
| -rw-r--r-- | pci.c | 574 | ||||
| -rw-r--r-- | pci.h | 11 | ||||
| -rw-r--r-- | peer.c | 238 | ||||
| -rw-r--r-- | peer.h | 31 | ||||
| -rw-r--r-- | qmi.c | 949 | ||||
| -rw-r--r-- | qmi.h | 63 | ||||
| -rw-r--r-- | reg.c | 94 | ||||
| -rw-r--r-- | reg.h | 10 | ||||
| -rw-r--r-- | rx_desc.h | 123 | ||||
| -rw-r--r-- | trace.h | 41 | ||||
| -rw-r--r-- | wmi.c | 1953 | ||||
| -rw-r--r-- | wmi.h | 1200 | ||||
| -rw-r--r-- | wow.c | 1027 | ||||
| -rw-r--r-- | wow.h | 62 |
56 files changed, 22075 insertions, 3261 deletions
@@ -2,11 +2,12 @@ config ATH12K tristate "Qualcomm Technologies Wi-Fi 7 support (ath12k)" depends on MAC80211 && HAS_DMA && PCI - depends on CRYPTO_MICHAEL_MIC + select CRYPTO_MICHAEL_MIC select QCOM_QMI_HELPERS select MHI_BUS select QRTR select QRTR_MHI + select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL help Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE 802.11be) family of chipsets, for example WCN7850 and @@ -24,6 +25,15 @@ config ATH12K_DEBUG If unsure, say Y to make it easier to debug problems. But if you want optimal performance choose N. +config ATH12K_DEBUGFS + bool "QTI ath12k debugfs support" + depends on ATH12K && MAC80211_DEBUGFS + help + Enable ath12k debugfs support + + If unsure, say Y to make it easier to debug problems. But if + you want optimal performance choose N. + config ATH12K_TRACING bool "ath12k tracing support" depends on ATH12K && EVENT_TRACING @@ -32,3 +42,13 @@ config ATH12K_TRACING If unsure, say Y to make it easier to debug problems. But if you want optimal performance choose N. + +config ATH12K_COREDUMP + bool "ath12k coredump" + depends on ATH12K + select WANT_DEV_COREDUMP + help + Enable ath12k coredump collection + + If unsure, say Y to make it easier to debug problems. But if + dump collection not required choose N. @@ -19,9 +19,15 @@ ath12k-y += core.o \ hw.o \ mhi.o \ pci.o \ - dp_mon.o + dp_mon.o \ + fw.o \ + p2p.o +ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o +ath12k-$(CONFIG_ACPI) += acpi.o ath12k-$(CONFIG_ATH12K_TRACING) += trace.o +ath12k-$(CONFIG_PM) += wow.o +ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/acpi.c b/acpi.c new file mode 100644 index 000000000000..0555d35aab47 --- /dev/null +++ b/acpi.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "acpi.h" +#include "debug.h" + +static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func) +{ + union acpi_object *obj; + acpi_handle root_handle; + int ret; + + root_handle = ACPI_HANDLE(ab->dev); + if (!root_handle) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "invalid acpi handler\n"); + return -EOPNOTSUPP; + } + + obj = acpi_evaluate_dsm(root_handle, ab->hw_params->acpi_guid, 0, func, + NULL); + + if (!obj) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_evaluate_dsm() failed\n"); + return -ENOENT; + } + + if (obj->type == ACPI_TYPE_INTEGER) { + ab->acpi.func_bit = obj->integer.value; + } else if (obj->type == ACPI_TYPE_BUFFER) { + switch (func) { + case ATH12K_ACPI_DSM_FUNC_TAS_CFG: + if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.tas_cfg, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_TAS_DATA: + if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM TAS data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.tas_sar_power_table, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_BIOS_SAR: + if (obj->buffer.length != ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI BIOS SAR data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.bios_sar_data, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_GEO_OFFSET: + if (obj->buffer.length != ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI GEO OFFSET data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.geo_offset_data, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_INDEX_CCA: + if (obj->buffer.length != ATH12K_ACPI_DSM_CCA_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM CCA data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.cca_data, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE: + if (obj->buffer.length != ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM band edge data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.band_edge_power, obj->buffer.pointer, + obj->buffer.length); + + break; + } + } else { + ath12k_warn(ab, "ACPI DSM method returned an unsupported object type: %d\n", + obj->type); + ret = -EINVAL; + goto out; + } + + ret = 0; + +out: + ACPI_FREE(obj); + return ret; +} + +static int ath12k_acpi_set_power_limit(struct ath12k_base *ab) +{ + const u8 *tas_sar_power_table = ab->acpi.tas_sar_power_table; + int ret; + + if (tas_sar_power_table[0] != ATH12K_ACPI_TAS_DATA_VERSION || + tas_sar_power_table[1] != ATH12K_ACPI_TAS_DATA_ENABLE) { + ath12k_warn(ab, "latest ACPI TAS data is invalid\n"); + return -EINVAL; + } + + ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE, + tas_sar_power_table, + ATH12K_ACPI_DSM_TAS_DATA_SIZE); + if (ret) { + ath12k_warn(ab, "failed to send ACPI TAS data table: %d\n", ret); + return ret; + } + + return ret; +} + +static int ath12k_acpi_set_bios_sar_power(struct ath12k_base *ab) +{ + int ret; + + if (ab->acpi.bios_sar_data[0] != ATH12K_ACPI_POWER_LIMIT_VERSION || + ab->acpi.bios_sar_data[1] != ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG) { + ath12k_warn(ab, "invalid latest ACPI BIOS SAR data\n"); + return -EINVAL; + } + + ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data); + if (ret) { + ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret); + return ret; + } + + return 0; +} + +static void ath12k_acpi_dsm_notify(acpi_handle handle, u32 event, void *data) +{ + int ret; + struct ath12k_base *ab = data; + + if (event == ATH12K_ACPI_NOTIFY_EVENT) { + ath12k_warn(ab, "unknown acpi notify %u\n", event); + return; + } + + if (!ab->acpi.acpi_tas_enable) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_tas_enable is false\n"); + return; + } + + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA); + if (ret) { + ath12k_warn(ab, "failed to update ACPI TAS data table: %d\n", ret); + return; + } + + ret = ath12k_acpi_set_power_limit(ab); + if (ret) { + ath12k_warn(ab, "failed to set ACPI TAS power limit data: %d", ret); + return; + } + + if (!ab->acpi.acpi_bios_sar_enable) + return; + + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR); + if (ret) { + ath12k_warn(ab, "failed to update BIOS SAR: %d\n", ret); + return; + } + + ret = ath12k_acpi_set_bios_sar_power(ab); + if (ret) { + ath12k_warn(ab, "failed to set BIOS SAR power limit: %d\n", ret); + return; + } +} + +static int ath12k_acpi_set_bios_sar_params(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data); + if (ret) { + ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret); + return ret; + } + + ret = ath12k_wmi_set_bios_geo_cmd(ab, ab->acpi.geo_offset_data); + if (ret) { + ath12k_warn(ab, "failed to set ACPI BIOS GEO table: %d\n", ret); + return ret; + } + + return 0; +} + +static int ath12k_acpi_set_tas_params(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_CONFIG_TYPE, + ab->acpi.tas_cfg, + ATH12K_ACPI_DSM_TAS_CFG_SIZE); + if (ret) { + ath12k_warn(ab, "failed to send ACPI TAS config table parameter: %d\n", + ret); + return ret; + } + + ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE, + ab->acpi.tas_sar_power_table, + ATH12K_ACPI_DSM_TAS_DATA_SIZE); + if (ret) { + ath12k_warn(ab, "failed to send ACPI TAS data table parameter: %d\n", + ret); + return ret; + } + + return 0; +} + +int ath12k_acpi_start(struct ath12k_base *ab) +{ + acpi_status status; + u8 *buf; + int ret; + + if (!ab->hw_params->acpi_guid) + /* not supported with this hardware */ + return 0; + + ab->acpi.acpi_tas_enable = false; + + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS); + if (ret) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret); + return ret; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG); + if (ret) { + ath12k_warn(ab, "failed to get ACPI TAS config table: %d\n", ret); + return ret; + } + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_DATA)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA); + if (ret) { + ath12k_warn(ab, "failed to get ACPI TAS data table: %d\n", ret); + return ret; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG) && + ab->acpi.tas_sar_power_table[0] == ATH12K_ACPI_TAS_DATA_VERSION && + ab->acpi.tas_sar_power_table[1] == ATH12K_ACPI_TAS_DATA_ENABLE) + ab->acpi.acpi_tas_enable = true; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR); + if (ret) { + ath12k_warn(ab, "failed to get ACPI bios sar data: %d\n", ret); + return ret; + } + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_GEO_OFFSET)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_GEO_OFFSET); + if (ret) { + ath12k_warn(ab, "failed to get ACPI geo offset data: %d\n", ret); + return ret; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR) && + ab->acpi.bios_sar_data[0] == ATH12K_ACPI_POWER_LIMIT_VERSION && + ab->acpi.bios_sar_data[1] == ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG && + !ab->acpi.acpi_tas_enable) + ab->acpi.acpi_bios_sar_enable = true; + } + + if (ab->acpi.acpi_tas_enable) { + ret = ath12k_acpi_set_tas_params(ab); + if (ret) { + ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret); + return ret; + } + } + + if (ab->acpi.acpi_bios_sar_enable) { + ret = ath12k_acpi_set_bios_sar_params(ab); + if (ret) + return ret; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA); + if (ret) { + ath12k_warn(ab, "failed to get ACPI DSM CCA threshold configuration: %d\n", + ret); + return ret; + } + + if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION && + ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] == + ATH12K_ACPI_CCA_THR_ENABLE_FLAG) { + buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET; + ret = ath12k_wmi_set_bios_cmd(ab, + WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE, + buf, + ATH12K_ACPI_CCA_THR_OFFSET_LEN); + if (ret) { + ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n", + ret); + return ret; + } + } + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, + ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE); + if (ret) { + ath12k_warn(ab, "failed to get ACPI DSM band edge channel power: %d\n", + ret); + return ret; + } + + if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION && + ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) { + ret = ath12k_wmi_set_bios_cmd(ab, + WMI_BIOS_PARAM_TYPE_BANDEDGE, + ab->acpi.band_edge_power, + sizeof(ab->acpi.band_edge_power)); + if (ret) { + ath12k_warn(ab, + "failed to set ACPI DSM band edge channel power: %d\n", + ret); + return ret; + } + } + } + + status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev), + ACPI_DEVICE_NOTIFY, + ath12k_acpi_dsm_notify, ab); + if (ACPI_FAILURE(status)) { + ath12k_warn(ab, "failed to install DSM notify callback: %d\n", status); + return -EIO; + } + + ab->acpi.started = true; + + return 0; +} + +void ath12k_acpi_stop(struct ath12k_base *ab) +{ + if (!ab->acpi.started) + return; + + acpi_remove_notify_handler(ACPI_HANDLE(ab->dev), + ACPI_DEVICE_NOTIFY, + ath12k_acpi_dsm_notify); + + memset(&ab->acpi, 0, sizeof(ab->acpi)); +} diff --git a/acpi.h b/acpi.h new file mode 100644 index 000000000000..39e003d86a48 --- /dev/null +++ b/acpi.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef ATH12K_ACPI_H +#define ATH12K_ACPI_H + +#include <linux/acpi.h> + +#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0 +#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4 +#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5 +#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6 +#define ATH12K_ACPI_DSM_FUNC_TAS_CFG 8 +#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9 +#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10 + +#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3) +#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4) +#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5) +#define ATH12K_ACPI_FUNC_BIT_TAS_CFG BIT(7) +#define ATH12K_ACPI_FUNC_BIT_TAS_DATA BIT(8) +#define ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER BIT(9) + +#define ATH12K_ACPI_NOTIFY_EVENT 0x86 +#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func)) + +#define ATH12K_ACPI_TAS_DATA_VERSION 0x1 +#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1 +#define ATH12K_ACPI_POWER_LIMIT_VERSION 0x1 +#define ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG 0x1 +#define ATH12K_ACPI_CCA_THR_VERSION 0x1 +#define ATH12K_ACPI_CCA_THR_ENABLE_FLAG 0x1 +#define ATH12K_ACPI_BAND_EDGE_VERSION 0x1 +#define ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG 0x1 + +#define ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET 1 +#define ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET 2 +#define ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET 5 +#define ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN 10 +#define ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET 12 +#define ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN 18 +#define ATH12K_ACPI_BIOS_SAR_TABLE_LEN 22 +#define ATH12K_ACPI_CCA_THR_OFFSET_LEN 36 + +#define ATH12K_ACPI_DSM_TAS_DATA_SIZE 69 +#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100 +#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108 + +#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \ + ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN) +#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \ + ATH12K_ACPI_BIOS_SAR_TABLE_LEN) +#define ATH12K_ACPI_DSM_CCA_DATA_SIZE (ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET + \ + ATH12K_ACPI_CCA_THR_OFFSET_LEN) + +#ifdef CONFIG_ACPI + +int ath12k_acpi_start(struct ath12k_base *ab); +void ath12k_acpi_stop(struct ath12k_base *ab); + +#else + +static inline int ath12k_acpi_start(struct ath12k_base *ab) +{ + return 0; +} + +static inline void ath12k_acpi_stop(struct ath12k_base *ab) +{ +} + +#endif /* CONFIG_ACPI */ + +#endif /* ATH12K_ACPI_H */ @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_CE_H @@ -119,7 +119,7 @@ struct ath12k_ce_ring { /* Host address space */ void *base_addr_owner_space_unaligned; /* CE address space */ - u32 base_addr_ce_space_unaligned; + dma_addr_t base_addr_ce_space_unaligned; /* Actual start of descriptors. * Aligned to descriptor-size boundary. @@ -129,7 +129,7 @@ struct ath12k_ce_ring { void *base_addr_owner_space; /* CE address space */ - u32 base_addr_ce_space; + dma_addr_t base_addr_ce_space; /* HAL ring id */ u32 hal_ring_id; @@ -148,7 +148,7 @@ struct ath12k_ce_pipe { void (*send_cb)(struct ath12k_ce_pipe *pipe); void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb); - struct tasklet_struct intr_tq; + struct work_struct intr_wq; struct ath12k_ce_ring *src_ring; struct ath12k_ce_ring *dest_ring; struct ath12k_ce_ring *status_ring; @@ -176,9 +176,6 @@ int ath12k_ce_alloc_pipes(struct ath12k_base *ab); void ath12k_ce_free_pipes(struct ath12k_base *ab); int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id); void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id); -int ath12k_ce_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe); -int ath12k_ce_attr_attach(struct ath12k_base *ab); void ath12k_ce_get_shadow_config(struct ath12k_base *ab, u32 **shadow_cfg, u32 *shadow_cfg_len); #endif @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/module.h> @@ -9,101 +9,228 @@ #include <linux/remoteproc.h> #include <linux/firmware.h> #include <linux/of.h> +#include <linux/of_graph.h> #include "core.h" #include "dp_tx.h" #include "dp_rx.h" #include "debug.h" #include "hif.h" +#include "fw.h" +#include "debugfs.h" +#include "wow.h" unsigned int ath12k_debug_mask; module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); -int ath12k_core_suspend(struct ath12k_base *ab) +/* protected with ath12k_hw_group_mutex */ +static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list); + +static DEFINE_MUTEX(ath12k_hw_group_mutex); + +static int ath12k_core_rfkill_config(struct ath12k_base *ab) { - int ret; + struct ath12k *ar; + int ret = 0, i; + + if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL)) + return 0; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + + ret = ath12k_mac_rfkill_config(ar); + if (ret && ret != -EOPNOTSUPP) { + ath12k_warn(ab, "failed to configure rfkill: %d", ret); + return ret; + } + } + + return ret; +} + +/* Check if we need to continue with suspend/resume operation. + * Return: + * a negative value: error happens and don't continue. + * 0: no error but don't continue. + * positive value: no error and do continue. + */ +static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab) +{ + struct ath12k *ar; if (!ab->hw_params->supports_suspend) return -EOPNOTSUPP; - /* TODO: there can frames in queues so for now add delay as a hack. - * Need to implement to handle and remove this delay. + /* so far single_pdev_only chips have supports_suspend as true + * so pass 0 as a dummy pdev_id here. */ - msleep(500); + ar = ab->pdevs[0].ar; + if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF) + return 0; - ret = ath12k_dp_rx_pktlog_stop(ab, true); - if (ret) { - ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", - ret); + return 1; +} + +int ath12k_core_suspend(struct ath12k_base *ab) +{ + struct ath12k *ar; + int ret, i; + + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) return ret; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + if (!ar) + continue; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + + ret = ath12k_mac_wait_tx_complete(ar); + if (ret) { + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + ath12k_warn(ab, "failed to wait tx complete: %d\n", ret); + return ret; + } + + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); } - ret = ath12k_dp_rx_pktlog_stop(ab, false); - if (ret) { - ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", - ret); + /* PM framework skips suspend_late/resume_early callbacks + * if other devices report errors in their suspend callbacks. + * However ath12k_core_resume() would still be called because + * here we return success thus kernel put us on dpm_suspended_list. + * Since we won't go through a power down/up cycle, there is + * no chance to call complete(&ab->restart_completed) in + * ath12k_core_restart(), making ath12k_core_resume() timeout. + * So call it here to avoid this issue. This also works in case + * no error happens thus suspend_late/resume_early get called, + * because it will be reinitialized in ath12k_core_resume_early(). + */ + complete(&ab->restart_completed); + + return 0; +} +EXPORT_SYMBOL(ath12k_core_suspend); + +int ath12k_core_suspend_late(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) return ret; - } + + ath12k_acpi_stop(ab); ath12k_hif_irq_disable(ab); ath12k_hif_ce_irq_disable(ab); - ret = ath12k_hif_suspend(ab); - if (ret) { - ath12k_warn(ab, "failed to suspend hif: %d\n", ret); - return ret; - } + ath12k_hif_power_down(ab, true); return 0; } +EXPORT_SYMBOL(ath12k_core_suspend_late); -int ath12k_core_resume(struct ath12k_base *ab) +int ath12k_core_resume_early(struct ath12k_base *ab) { int ret; - if (!ab->hw_params->supports_suspend) - return -EOPNOTSUPP; - - ret = ath12k_hif_resume(ab); - if (ret) { - ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret); + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) return ret; - } - ath12k_hif_ce_irq_enable(ab); - ath12k_hif_irq_enable(ab); + reinit_completion(&ab->restart_completed); + ret = ath12k_hif_power_up(ab); + if (ret) + ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret); - ret = ath12k_dp_rx_pktlog_start(ab); - if (ret) { - ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n", - ret); + return ret; +} +EXPORT_SYMBOL(ath12k_core_resume_early); + +int ath12k_core_resume(struct ath12k_base *ab) +{ + long time_left; + int ret; + + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) return ret; + + time_left = wait_for_completion_timeout(&ab->restart_completed, + ATH12K_RESET_TIMEOUT_HZ); + if (time_left == 0) { + ath12k_warn(ab, "timeout while waiting for restart complete"); + return -ETIMEDOUT; } return 0; } +EXPORT_SYMBOL(ath12k_core_resume); -static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name, - size_t name_len) +static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, + size_t name_len, bool with_variant, + bool bus_type_mode) { /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; - if (ab->qmi.target.bdf_ext[0] != '\0') + if (with_variant && ab->qmi.target.bdf_ext[0] != '\0') scnprintf(variant, sizeof(variant), ",variant=%s", ab->qmi.target.bdf_ext); - scnprintf(name, name_len, - "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", - ath12k_bus_str(ab->hif.bus), - ab->qmi.target.chip_id, - ab->qmi.target.board_id, variant); + switch (ab->id.bdf_search) { + case ATH12K_BDF_SEARCH_BUS_AND_BOARD: + if (bus_type_mode) + scnprintf(name, name_len, + "bus=%s", + ath12k_bus_str(ab->hif.bus)); + else + scnprintf(name, name_len, + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s", + ath12k_bus_str(ab->hif.bus), + ab->id.vendor, ab->id.device, + ab->id.subsystem_vendor, + ab->id.subsystem_device, + ab->qmi.target.chip_id, + ab->qmi.target.board_id, + variant); + break; + default: + scnprintf(name, name_len, + "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", + ath12k_bus_str(ab->hif.bus), + ab->qmi.target.chip_id, + ab->qmi.target.board_id, variant); + break; + } ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name); return 0; } +static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name, + size_t name_len) +{ + return __ath12k_core_create_board_name(ab, name, name_len, true, false); +} + +static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name, + size_t name_len) +{ + return __ath12k_core_create_board_name(ab, name, name_len, false, false); +} + +static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name, + size_t name_len) +{ + return __ath12k_core_create_board_name(ab, name, name_len, false, true); +} + const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, const char *file) { @@ -138,7 +265,9 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab, struct ath12k_board_data *bd, const void *buf, size_t buf_len, const char *boardname, - int bd_ie_type) + int ie_id, + int name_id, + int data_id) { const struct ath12k_fw_ie *hdr; bool name_match_found; @@ -148,7 +277,7 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab, name_match_found = false; - /* go through ATH12K_BD_IE_BOARD_ elements */ + /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */ while (buf_len > sizeof(struct ath12k_fw_ie)) { hdr = buf; board_ie_id = le32_to_cpu(hdr->id); @@ -159,48 +288,50 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab, buf += sizeof(*hdr); if (buf_len < ALIGN(board_ie_len, 4)) { - ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n", + ath12k_err(ab, "invalid %s length: %zu < %zu\n", + ath12k_bd_ie_type_str(ie_id), buf_len, ALIGN(board_ie_len, 4)); ret = -EINVAL; goto out; } - switch (board_ie_id) { - case ATH12K_BD_IE_BOARD_NAME: + if (board_ie_id == name_id) { ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "", board_ie_data, board_ie_len); if (board_ie_len != strlen(boardname)) - break; + goto next; ret = memcmp(board_ie_data, boardname, strlen(boardname)); if (ret) - break; + goto next; name_match_found = true; ath12k_dbg(ab, ATH12K_DBG_BOOT, - "boot found match for name '%s'", + "boot found match %s for name '%s'", + ath12k_bd_ie_type_str(ie_id), boardname); - break; - case ATH12K_BD_IE_BOARD_DATA: + } else if (board_ie_id == data_id) { if (!name_match_found) /* no match found */ - break; + goto next; ath12k_dbg(ab, ATH12K_DBG_BOOT, - "boot found board data for '%s'", boardname); + "boot found %s for '%s'", + ath12k_bd_ie_type_str(ie_id), + boardname); bd->data = board_ie_data; bd->len = board_ie_len; ret = 0; goto out; - default: - ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n", + } else { + ath12k_warn(ab, "unknown %s id found: %d\n", + ath12k_bd_ie_type_str(ie_id), board_ie_id); - break; } - +next: /* jump over the padding */ board_ie_len = ALIGN(board_ie_len, 4); @@ -217,7 +348,10 @@ out: static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab, struct ath12k_board_data *bd, - const char *boardname) + const char *boardname, + int ie_id_match, + int name_id, + int data_id) { size_t len, magic_len; const u8 *data; @@ -282,22 +416,23 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab, goto err; } - switch (ie_id) { - case ATH12K_BD_IE_BOARD: + if (ie_id == ie_id_match) { ret = ath12k_core_parse_bd_ie_board(ab, bd, data, ie_len, boardname, - ATH12K_BD_IE_BOARD); + ie_id_match, + name_id, + data_id); if (ret == -ENOENT) /* no match found, continue */ - break; + goto next; else if (ret) /* there was an error, bail out */ goto err; /* either found or error, so stop searching */ goto out; } - +next: /* jump over the padding */ ie_len = ALIGN(ie_len, 4); @@ -307,8 +442,9 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab, out: if (!bd->data || !bd->len) { - ath12k_err(ab, - "failed to fetch board data for %s from %s\n", + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "failed to fetch %s for %s from %s\n", + ath12k_bd_ie_type_str(ie_id_match), boardname, filepath); ret = -ENODATA; goto err; @@ -335,48 +471,224 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, return 0; } -#define BOARD_NAME_SIZE 100 +#define BOARD_NAME_SIZE 200 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) { - char boardname[BOARD_NAME_SIZE]; + char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE]; + char *filename, filepath[100]; + int bd_api; int ret; - ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); + filename = ATH12K_BOARD_API2_FILE; + + ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname)); if (ret) { ath12k_err(ab, "failed to create board name: %d", ret); return ret; } - ab->bd_api = 2; - ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname); + bd_api = 2; + ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname, + ATH12K_BD_IE_BOARD, + ATH12K_BD_IE_BOARD_NAME, + ATH12K_BD_IE_BOARD_DATA); + if (!ret) + goto success; + + ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname, + sizeof(fallback_boardname)); + if (ret) { + ath12k_err(ab, "failed to create fallback board name: %d", ret); + return ret; + } + + ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname, + ATH12K_BD_IE_BOARD, + ATH12K_BD_IE_BOARD_NAME, + ATH12K_BD_IE_BOARD_DATA); if (!ret) goto success; - ab->bd_api = 1; + bd_api = 1; ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE); if (ret) { - ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n", + ath12k_core_create_firmware_path(ab, filename, + filepath, sizeof(filepath)); + ath12k_err(ab, "failed to fetch board data for %s from %s\n", + boardname, filepath); + if (memcmp(boardname, fallback_boardname, strlen(boardname))) + ath12k_err(ab, "failed to fetch board data for %s from %s\n", + fallback_boardname, filepath); + + ath12k_err(ab, "failed to fetch board.bin from %s\n", ab->hw_params->fw.dir); return ret; } success: - ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", ab->bd_api); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api); return 0; } +int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd) +{ + char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE]; + int ret; + + ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); + if (ret) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "failed to create board name for regdb: %d", ret); + goto exit; + } + + ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname, + ATH12K_BD_IE_REGDB, + ATH12K_BD_IE_REGDB_NAME, + ATH12K_BD_IE_REGDB_DATA); + if (!ret) + goto exit; + + ret = ath12k_core_create_bus_type_board_name(ab, default_boardname, + BOARD_NAME_SIZE); + if (ret) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "failed to create default board name for regdb: %d", ret); + goto exit; + } + + ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname, + ATH12K_BD_IE_REGDB, + ATH12K_BD_IE_REGDB_NAME, + ATH12K_BD_IE_REGDB_DATA); + if (!ret) + goto exit; + + ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME); + if (ret) + ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n", + ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir); + +exit: + if (!ret) + ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n"); + + return ret; +} + +u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab) +{ + if (ab->num_radios == 2) + return TARGET_NUM_STATIONS_DBS; + else if (ab->num_radios == 3) + return TARGET_NUM_PEERS_PDEV_DBS_SBS; + return TARGET_NUM_STATIONS_SINGLE; +} + +u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab) +{ + if (ab->num_radios == 2) + return TARGET_NUM_PEERS_PDEV_DBS; + else if (ab->num_radios == 3) + return TARGET_NUM_PEERS_PDEV_DBS_SBS; + return TARGET_NUM_PEERS_PDEV_SINGLE; +} + +u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab) +{ + if (ab->num_radios == 2) + return TARGET_NUM_TIDS(DBS); + else if (ab->num_radios == 3) + return TARGET_NUM_TIDS(DBS_SBS); + return TARGET_NUM_TIDS(SINGLE); +} + static void ath12k_core_stop(struct ath12k_base *ab) { + ath12k_core_stopped(ab); + if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath12k_qmi_firmware_stop(ab); + ath12k_acpi_stop(ab); + + ath12k_dp_rx_pdev_reo_cleanup(ab); ath12k_hif_stop(ab); ath12k_wmi_detach(ab); - ath12k_dp_rx_pdev_reo_cleanup(ab); + ath12k_dp_free(ab); /* De-Init of components as needed */ } +static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath12k_base *ab = data; + const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC; + struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr; + ssize_t copied; + size_t len; + int i; + + if (ab->qmi.target.bdf_ext[0] != '\0') + return; + + if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + if (!smbios->bdf_enabled) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + len = min_t(size_t, + strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext)); + for (i = 0; i < len; i++) { + if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic prefix */ + copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic), + sizeof(ab->qmi.target.bdf_ext)); + if (copied < 0) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate\n"); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext); +} + +int ath12k_core_check_smbios(struct ath12k_base *ab) +{ + ab->qmi.target.bdf_ext[0] = '\0'; + dmi_walk(ath12k_core_check_bdfext, ab); + + if (ab->qmi.target.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + static int ath12k_core_soc_create(struct ath12k_base *ab) { int ret; @@ -387,6 +699,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab) return ret; } + ath12k_debugfs_soc_create(ab); + ret = ath12k_hif_power_up(ab); if (ret) { ath12k_err(ab, "failed to power up :%d\n", ret); @@ -396,14 +710,16 @@ static int ath12k_core_soc_create(struct ath12k_base *ab) return 0; err_qmi_deinit: + ath12k_debugfs_soc_destroy(ab); ath12k_qmi_deinit_service(ab); return ret; } static void ath12k_core_soc_destroy(struct ath12k_base *ab) { - ath12k_dp_free(ab); + ath12k_hif_power_down(ab, false); ath12k_reg_free(ab); + ath12k_debugfs_soc_destroy(ab); ath12k_qmi_deinit_service(ab); } @@ -411,30 +727,17 @@ static int ath12k_core_pdev_create(struct ath12k_base *ab) { int ret; - ret = ath12k_mac_register(ab); - if (ret) { - ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret); - return ret; - } - ret = ath12k_dp_pdev_alloc(ab); if (ret) { ath12k_err(ab, "failed to attach DP pdev: %d\n", ret); - goto err_mac_unregister; + return ret; } return 0; - -err_mac_unregister: - ath12k_mac_unregister(ab); - - return ret; } static void ath12k_core_pdev_destroy(struct ath12k_base *ab) { - ath12k_mac_unregister(ab); - ath12k_hif_irq_disable(ab); ath12k_dp_pdev_free(ab); } @@ -443,6 +746,8 @@ static int ath12k_core_start(struct ath12k_base *ab, { int ret; + lockdep_assert_held(&ab->core_lock); + ret = ath12k_wmi_attach(ab); if (ret) { ath12k_err(ab, "failed to attach wmi: %d\n", ret); @@ -492,23 +797,16 @@ static int ath12k_core_start(struct ath12k_base *ab, goto err_hif_stop; } - ret = ath12k_mac_allocate(ab); - if (ret) { - ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n", - ret); - goto err_hif_stop; - } - ath12k_dp_cc_config(ab); - ath12k_dp_pdev_pre_alloc(ab); - ret = ath12k_dp_rx_pdev_reo_setup(ab); if (ret) { ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret); - goto err_mac_destroy; + goto err_hif_stop; } + ath12k_dp_hal_rx_desc_init(ab); + ret = ath12k_wmi_cmd_init(ab); if (ret) { ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret); @@ -538,12 +836,19 @@ static int ath12k_core_start(struct ath12k_base *ab, goto err_reo_cleanup; } + ret = ath12k_acpi_start(ab); + if (ret) + /* ACPI is optional so continue in case of an error */ + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret); + + if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) + /* Indicate the core start in the appropriate group */ + ath12k_core_started(ab); + return 0; err_reo_cleanup: ath12k_dp_rx_pdev_reo_cleanup(ab); -err_mac_destroy: - ath12k_mac_destroy(ab); err_hif_stop: ath12k_hif_stop(ab); err_wmi_detach: @@ -551,6 +856,169 @@ err_wmi_detach: return ret; } +static void ath12k_core_device_cleanup(struct ath12k_base *ab) +{ + mutex_lock(&ab->core_lock); + + ath12k_hif_irq_disable(ab); + ath12k_core_pdev_destroy(ab); + + mutex_unlock(&ab->core_lock); +} + +static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i; + + lockdep_assert_held(&ag->mutex); + + clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags); + + ath12k_mac_unregister(ag); + + for (i = ag->num_devices - 1; i >= 0; i--) { + ab = ag->ab[i]; + if (!ab) + continue; + ath12k_core_device_cleanup(ab); + } + + ath12k_mac_destroy(ag); +} + +static int __ath12k_mac_mlo_ready(struct ath12k *ar) +{ + int ret; + + ret = ath12k_wmi_mlo_ready(ar); + if (ret) { + ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n", + ar->pdev_idx, ret); + return ret; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n", + ar->pdev_idx); + + return 0; +} + +int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag) +{ + struct ath12k_hw *ah; + struct ath12k *ar; + int ret; + int i, j; + + for (i = 0; i < ag->num_hw; i++) { + ah = ag->ah[i]; + if (!ah) + continue; + + for_each_ar(ah, ar, j) { + ar = &ah->radio[j]; + ret = __ath12k_mac_mlo_ready(ar); + if (ret) + goto out; + } + } + +out: + return ret; +} + +static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag) +{ + int ret, i; + + if (!ag->mlo_capable || ag->num_devices == 1) + return 0; + + ret = ath12k_mac_mlo_setup(ag); + if (ret) + return ret; + + for (i = 0; i < ag->num_devices; i++) + ath12k_dp_partner_cc_init(ag->ab[i]); + + ret = ath12k_mac_mlo_ready(ag); + if (ret) + goto err_mlo_teardown; + + return 0; + +err_mlo_teardown: + ath12k_mac_mlo_teardown(ag); + + return ret; +} + +static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int ret, i; + + lockdep_assert_held(&ag->mutex); + + if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags)) + goto core_pdev_create; + + ret = ath12k_mac_allocate(ag); + if (WARN_ON(ret)) + return ret; + + ret = ath12k_core_mlo_setup(ag); + if (WARN_ON(ret)) + goto err_mac_destroy; + + ret = ath12k_mac_register(ag); + if (WARN_ON(ret)) + goto err_mlo_teardown; + + set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags); + +core_pdev_create: + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + mutex_lock(&ab->core_lock); + + ret = ath12k_core_pdev_create(ab); + if (ret) { + ath12k_err(ab, "failed to create pdev core %d\n", ret); + mutex_unlock(&ab->core_lock); + goto err; + } + + ath12k_hif_irq_enable(ab); + + ret = ath12k_core_rfkill_config(ab); + if (ret && ret != -EOPNOTSUPP) { + mutex_unlock(&ab->core_lock); + goto err; + } + + mutex_unlock(&ab->core_lock); + } + + return 0; + +err: + ath12k_core_hw_group_stop(ag); + return ret; + +err_mlo_teardown: + ath12k_mac_mlo_teardown(ag); + +err_mac_destroy: + ath12k_mac_destroy(ag); + + return ret; +} + static int ath12k_core_start_firmware(struct ath12k_base *ab, enum ath12k_firmware_mode mode) { @@ -568,9 +1036,37 @@ static int ath12k_core_start_firmware(struct ath12k_base *ab, return ret; } +static inline +bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag) +{ + lockdep_assert_held(&ag->mutex); + + return (ag->num_started == ag->num_devices); +} + +static void ath12k_core_trigger_partner(struct ath12k_base *ab) +{ + struct ath12k_hw_group *ag = ab->ag; + struct ath12k_base *partner_ab; + bool found = false; + int i; + + for (i = 0; i < ag->num_devices; i++) { + partner_ab = ag->ab[i]; + if (!partner_ab) + continue; + + if (found) + ath12k_qmi_trigger_host_cap(partner_ab); + + found = (partner_ab == ab); + } +} + int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) { - int ret; + struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab); + int ret, i; ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL); if (ret) { @@ -590,32 +1086,52 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) goto err_firmware_stop; } + mutex_lock(&ag->mutex); mutex_lock(&ab->core_lock); + ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL); if (ret) { ath12k_err(ab, "failed to start core: %d\n", ret); goto err_dp_free; } - ret = ath12k_core_pdev_create(ab); - if (ret) { - ath12k_err(ab, "failed to create pdev core: %d\n", ret); - goto err_core_stop; - } - ath12k_hif_irq_enable(ab); mutex_unlock(&ab->core_lock); + if (ath12k_core_hw_group_start_ready(ag)) { + ret = ath12k_core_hw_group_start(ag); + if (ret) { + ath12k_warn(ab, "unable to start hw group\n"); + goto err_core_stop; + } + ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id); + } else { + ath12k_core_trigger_partner(ab); + } + + mutex_unlock(&ag->mutex); + return 0; err_core_stop: - ath12k_core_stop(ab); - ath12k_mac_destroy(ab); + for (i = ag->num_devices - 1; i >= 0; i--) { + ab = ag->ab[i]; + if (!ab) + continue; + + mutex_lock(&ab->core_lock); + ath12k_core_stop(ab); + mutex_unlock(&ab->core_lock); + } + goto exit; + err_dp_free: ath12k_dp_free(ab); mutex_unlock(&ab->core_lock); err_firmware_stop: ath12k_qmi_firmware_stop(ab); +exit: + mutex_unlock(&ag->mutex); return ret; } @@ -624,9 +1140,8 @@ static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab) int ret; mutex_lock(&ab->core_lock); - ath12k_hif_irq_disable(ab); ath12k_dp_pdev_free(ab); - ath12k_hif_stop(ab); + ath12k_ce_cleanup_pipes(ab); ath12k_wmi_detach(ab); ath12k_dp_rx_pdev_reo_cleanup(ab); mutex_unlock(&ab->core_lock); @@ -655,11 +1170,43 @@ err_hal_srng_deinit: return ret; } +static void ath12k_rfkill_work(struct work_struct *work) +{ + struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work); + struct ath12k_hw_group *ag = ab->ag; + struct ath12k *ar; + struct ath12k_hw *ah; + struct ieee80211_hw *hw; + bool rfkill_radio_on; + int i, j; + + spin_lock_bh(&ab->base_lock); + rfkill_radio_on = ab->rfkill_radio_on; + spin_unlock_bh(&ab->base_lock); + + for (i = 0; i < ag->num_hw; i++) { + ah = ath12k_ag_to_ah(ag, i); + if (!ah) + continue; + + for (j = 0; j < ah->num_radio; j++) { + ar = &ah->radio[j]; + if (!ar) + continue; + + ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on); + } + + hw = ah->hw; + wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on); + } +} + void ath12k_core_halt(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ar->num_created_vdevs = 0; ar->allocated_vdev_map = 0; @@ -668,6 +1215,7 @@ void ath12k_core_halt(struct ath12k *ar) ath12k_mac_peer_cleanup_all(ar); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->regd_update_work); + cancel_work_sync(&ab->rfkill_work); rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); @@ -677,36 +1225,45 @@ void ath12k_core_halt(struct ath12k *ar) static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) { + struct ath12k_hw_group *ag = ab->ag; struct ath12k *ar; - struct ath12k_pdev *pdev; - int i; + struct ath12k_hw *ah; + int i, j; spin_lock_bh(&ab->base_lock); ab->stats.fw_crash_counter++; spin_unlock_bh(&ab->base_lock); - for (i = 0; i < ab->num_radios; i++) { - pdev = &ab->pdevs[i]; - ar = pdev->ar; - if (!ar || ar->state == ATH12K_STATE_OFF) - continue; + if (ab->is_reset) + set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); - ieee80211_stop_queues(ar->hw); - ath12k_mac_drain_tx(ar); - complete(&ar->scan.started); - complete(&ar->scan.completed); - complete(&ar->peer_assoc_done); - complete(&ar->peer_delete_done); - complete(&ar->install_key_done); - complete(&ar->vdev_setup_done); - complete(&ar->vdev_delete_done); - complete(&ar->bss_survey_done); + for (i = 0; i < ag->num_hw; i++) { + ah = ath12k_ag_to_ah(ag, i); + if (!ah || ah->state == ATH12K_HW_STATE_OFF) + continue; - wake_up(&ar->dp.tx_empty_waitq); - idr_for_each(&ar->txmgmt_idr, - ath12k_mac_tx_mgmt_pending_free, ar); - idr_destroy(&ar->txmgmt_idr); - wake_up(&ar->txmgmt_empty_waitq); + ieee80211_stop_queues(ah->hw); + + for (j = 0; j < ah->num_radio; j++) { + ar = &ah->radio[j]; + + ath12k_mac_drain_tx(ar); + complete(&ar->scan.started); + complete(&ar->scan.completed); + complete(&ar->scan.on_channel); + complete(&ar->peer_assoc_done); + complete(&ar->peer_delete_done); + complete(&ar->install_key_done); + complete(&ar->vdev_setup_done); + complete(&ar->vdev_delete_done); + complete(&ar->bss_survey_done); + + wake_up(&ar->dp.tx_empty_waitq); + idr_for_each(&ar->txmgmt_idr, + ath12k_mac_tx_mgmt_pending_free, ar); + idr_destroy(&ar->txmgmt_idr); + wake_up(&ar->txmgmt_empty_waitq); + } } wake_up(&ab->wmi_ab.tx_credits_wq); @@ -715,51 +1272,58 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) { + struct ath12k_hw_group *ag = ab->ag; + struct ath12k_hw *ah; struct ath12k *ar; - struct ath12k_pdev *pdev; - int i; + int i, j; - for (i = 0; i < ab->num_radios; i++) { - pdev = &ab->pdevs[i]; - ar = pdev->ar; - if (!ar || ar->state == ATH12K_STATE_OFF) + for (i = 0; i < ag->num_hw; i++) { + ah = ath12k_ag_to_ah(ag, i); + if (!ah || ah->state == ATH12K_HW_STATE_OFF) continue; - mutex_lock(&ar->conf_mutex); + wiphy_lock(ah->hw->wiphy); + mutex_lock(&ah->hw_mutex); + + switch (ah->state) { + case ATH12K_HW_STATE_ON: + ah->state = ATH12K_HW_STATE_RESTARTING; + + for (j = 0; j < ah->num_radio; j++) { + ar = &ah->radio[j]; + ath12k_core_halt(ar); + } - switch (ar->state) { - case ATH12K_STATE_ON: - ar->state = ATH12K_STATE_RESTARTING; - ath12k_core_halt(ar); - ieee80211_restart_hw(ar->hw); break; - case ATH12K_STATE_OFF: + case ATH12K_HW_STATE_OFF: ath12k_warn(ab, - "cannot restart radio %d that hasn't been started\n", + "cannot restart hw %d that hasn't been started\n", i); break; - case ATH12K_STATE_RESTARTING: + case ATH12K_HW_STATE_RESTARTING: break; - case ATH12K_STATE_RESTARTED: - ar->state = ATH12K_STATE_WEDGED; + case ATH12K_HW_STATE_RESTARTED: + ah->state = ATH12K_HW_STATE_WEDGED; fallthrough; - case ATH12K_STATE_WEDGED: + case ATH12K_HW_STATE_WEDGED: ath12k_warn(ab, - "device is wedged, will not restart radio %d\n", i); + "device is wedged, will not restart hw %d\n", i); break; } - mutex_unlock(&ar->conf_mutex); + + mutex_unlock(&ah->hw_mutex); + wiphy_unlock(ah->hw->wiphy); } + complete(&ab->driver_recovery); } static void ath12k_core_restart(struct work_struct *work) { struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work); - int ret; - - if (!ab->is_reset) - ath12k_core_pre_reconfigure_recovery(ab); + struct ath12k_hw_group *ag = ab->ag; + struct ath12k_hw *ah; + int ret, i; ret = ath12k_core_reconfigure_on_crash(ab); if (ret) { @@ -767,11 +1331,22 @@ static void ath12k_core_restart(struct work_struct *work) return; } - if (ab->is_reset) - complete_all(&ab->reconfigure_complete); + if (ab->is_reset) { + if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { + atomic_dec(&ab->reset_count); + complete(&ab->reset_complete); + ab->is_reset = false; + atomic_set(&ab->fail_cont_count, 0); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n"); + } + + for (i = 0; i < ag->num_hw; i++) { + ah = ath12k_ag_to_ah(ab->ag, i); + ieee80211_restart_hw(ah->hw); + } + } - if (!ab->is_reset) - ath12k_core_post_reconfigure_recovery(ab); + complete(&ab->restart_completed); } static void ath12k_core_reset(struct work_struct *work) @@ -780,7 +1355,7 @@ static void ath12k_core_reset(struct work_struct *work) int reset_count, fail_cont_count; long time_left; - if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) { + if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) { ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags); return; } @@ -825,20 +1400,17 @@ static void ath12k_core_reset(struct work_struct *work) ab->is_reset = true; atomic_set(&ab->recovery_count, 0); + ath12k_coredump_collect(ab); ath12k_core_pre_reconfigure_recovery(ab); - reinit_completion(&ab->reconfigure_complete); ath12k_core_post_reconfigure_recovery(ab); - reinit_completion(&ab->recovery_start); - atomic_set(&ab->recovery_start_count, 0); - ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n"); - time_left = wait_for_completion_timeout(&ab->recovery_start, - ATH12K_RECOVER_START_TIMEOUT_HZ); + ath12k_hif_irq_disable(ab); + ath12k_hif_ce_irq_disable(ab); - ath12k_hif_power_down(ab); + ath12k_hif_power_down(ab, false); ath12k_hif_power_up(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n"); @@ -854,34 +1426,458 @@ int ath12k_core_pre_init(struct ath12k_base *ab) return ret; } + ath12k_fw_map(ab); + return 0; } -int ath12k_core_init(struct ath12k_base *ab) +static int ath12k_core_panic_handler(struct notifier_block *nb, + unsigned long action, void *data) { - int ret; + struct ath12k_base *ab = container_of(nb, struct ath12k_base, + panic_nb); - ret = ath12k_core_soc_create(ab); - if (ret) { - ath12k_err(ab, "failed to create soc core: %d\n", ret); - return ret; + return ath12k_hif_panic_handler(ab); +} + +static int ath12k_core_panic_notifier_register(struct ath12k_base *ab) +{ + ab->panic_nb.notifier_call = ath12k_core_panic_handler; + + return atomic_notifier_chain_register(&panic_notifier_list, + &ab->panic_nb); +} + +static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab) +{ + atomic_notifier_chain_unregister(&panic_notifier_list, + &ab->panic_nb); +} + +static inline +bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag) +{ + lockdep_assert_held(&ag->mutex); + + return (ag->num_probed == ag->num_devices); +} + +static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab) +{ + struct ath12k_hw_group *ag; + int count = 0; + + lockdep_assert_held(&ath12k_hw_group_mutex); + + list_for_each_entry(ag, &ath12k_hw_group_list, list) + count++; + + ag = kzalloc(sizeof(*ag), GFP_KERNEL); + if (!ag) + return NULL; + + ag->id = count; + list_add(&ag->list, &ath12k_hw_group_list); + mutex_init(&ag->mutex); + ag->mlo_capable = false; + + return ag; +} + +static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag) +{ + mutex_lock(&ath12k_hw_group_mutex); + + list_del(&ag->list); + kfree(ag); + + mutex_unlock(&ath12k_hw_group_mutex); +} + +static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab) +{ + struct ath12k_hw_group *ag; + int i; + + if (!ab->dev->of_node) + return NULL; + + list_for_each_entry(ag, &ath12k_hw_group_list, list) + for (i = 0; i < ag->num_devices; i++) + if (ag->wsi_node[i] == ab->dev->of_node) + return ag; + + return NULL; +} + +static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag, + struct ath12k_base *ab) +{ + struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev; + struct device_node *tx_endpoint, *next_rx_endpoint; + int device_count = 0; + + next_wsi_dev = wsi_dev; + + if (!next_wsi_dev) + return -ENODEV; + + do { + ag->wsi_node[device_count] = next_wsi_dev; + + tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1); + if (!tx_endpoint) { + of_node_put(next_wsi_dev); + return -ENODEV; + } + + next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint); + if (!next_rx_endpoint) { + of_node_put(next_wsi_dev); + of_node_put(tx_endpoint); + return -ENODEV; + } + + of_node_put(tx_endpoint); + of_node_put(next_wsi_dev); + + next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint); + if (!next_wsi_dev) { + of_node_put(next_rx_endpoint); + return -ENODEV; + } + + of_node_put(next_rx_endpoint); + + device_count++; + if (device_count > ATH12K_MAX_SOCS) { + ath12k_warn(ab, "device count in DT %d is more than limit %d\n", + device_count, ATH12K_MAX_SOCS); + of_node_put(next_wsi_dev); + return -EINVAL; + } + } while (wsi_dev != next_wsi_dev); + + of_node_put(next_wsi_dev); + ag->num_devices = device_count; + + return 0; +} + +static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag, + struct ath12k_base *ab) +{ + int i, wsi_controller_index = -1, node_index = -1; + bool control; + + for (i = 0; i < ag->num_devices; i++) { + control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller"); + if (control) + wsi_controller_index = i; + + if (ag->wsi_node[i] == ab->dev->of_node) + node_index = i; } + if (wsi_controller_index == -1) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt"); + return -EINVAL; + } + + if (node_index == -1) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index"); + return -EINVAL; + } + + ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) % + ag->num_devices; + return 0; } -void ath12k_core_deinit(struct ath12k_base *ab) +static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab) { - mutex_lock(&ab->core_lock); + struct ath12k_wsi_info *wsi = &ab->wsi_info; + struct ath12k_hw_group *ag; + + lockdep_assert_held(&ath12k_hw_group_mutex); + + /* The grouping of multiple devices will be done based on device tree file. + * The platforms that do not have any valid group information would have + * each device to be part of its own invalid group. + * + * We use group id ATH12K_INVALID_GROUP_ID for single device group + * which didn't have dt entry or wrong dt entry, there could be many + * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So + * default group id of ATH12K_INVALID_GROUP_ID combined with + * num devices in ath12k_hw_group determines if the group is + * multi device or single device group + */ - ath12k_core_pdev_destroy(ab); - ath12k_core_stop(ab); + ag = ath12k_core_hw_group_find_by_dt(ab); + if (!ag) { + ag = ath12k_core_hw_group_alloc(ab); + if (!ag) { + ath12k_warn(ab, "unable to create new hw group\n"); + return NULL; + } - mutex_unlock(&ab->core_lock); + if (ath12k_core_get_wsi_info(ag, ab) || + ath12k_core_get_wsi_index(ag, ab)) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "unable to get wsi info from dt, grouping single device"); + ag->id = ATH12K_INVALID_GROUP_ID; + ag->num_devices = 1; + memset(ag->wsi_node, 0, sizeof(ag->wsi_node)); + wsi->index = 0; + } + + goto exit; + } else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n", + ag->id); + goto invalid_group; + } else { + if (ath12k_core_get_wsi_index(ag, ab)) + goto invalid_group; + goto exit; + } - ath12k_hif_power_down(ab); - ath12k_mac_destroy(ab); - ath12k_core_soc_destroy(ab); +invalid_group: + ag = ath12k_core_hw_group_alloc(ab); + if (!ag) { + ath12k_warn(ab, "unable to create new hw group\n"); + return NULL; + } + + ag->id = ATH12K_INVALID_GROUP_ID; + ag->num_devices = 1; + wsi->index = 0; + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n"); + +exit: + if (ag->num_probed >= ag->num_devices) { + ath12k_warn(ab, "unable to add new device to group, max limit reached\n"); + goto invalid_group; + } + + ab->device_id = ag->num_probed++; + ag->ab[ab->device_id] = ab; + ab->ag = ag; + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d", + ag->id, ag->num_devices, wsi->index); + + return ag; +} + +void ath12k_core_hw_group_unassign(struct ath12k_base *ab) +{ + struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab); + u8 device_id = ab->device_id; + int num_probed; + + if (!ag) + return; + + mutex_lock(&ag->mutex); + + if (WARN_ON(device_id >= ag->num_devices)) { + mutex_unlock(&ag->mutex); + return; + } + + if (WARN_ON(ag->ab[device_id] != ab)) { + mutex_unlock(&ag->mutex); + return; + } + + ag->ab[device_id] = NULL; + ab->ag = NULL; + ab->device_id = ATH12K_INVALID_DEVICE_ID; + + if (ag->num_probed) + ag->num_probed--; + + num_probed = ag->num_probed; + + mutex_unlock(&ag->mutex); + + if (!num_probed) + ath12k_core_hw_group_free(ag); +} + +static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i; + + if (WARN_ON(!ag)) + return; + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + ath12k_core_soc_destroy(ab); + } +} + +static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i; + + if (!ag) + return; + + mutex_lock(&ag->mutex); + + if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) { + mutex_unlock(&ag->mutex); + return; + } + + set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags); + + ath12k_core_hw_group_stop(ag); + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + mutex_lock(&ab->core_lock); + ath12k_core_stop(ab); + mutex_unlock(&ab->core_lock); + } + + mutex_unlock(&ag->mutex); +} + +static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i, ret; + + lockdep_assert_held(&ag->mutex); + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + mutex_lock(&ab->core_lock); + + ret = ath12k_core_soc_create(ab); + if (ret) { + mutex_unlock(&ab->core_lock); + ath12k_err(ab, "failed to create soc core: %d\n", ret); + return ret; + } + + mutex_unlock(&ab->core_lock); + } + + return 0; +} + +void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i; + + lockdep_assert_held(&ag->mutex); + + /* If more than one devices are grouped, then inter MLO + * functionality can work still independent of whether internally + * each device supports single_chip_mlo or not. + * Only when there is one device, then it depends whether the + * device can support intra chip MLO or not + */ + if (ag->num_devices > 1) { + ag->mlo_capable = true; + } else { + ab = ag->ab[0]; + ag->mlo_capable = ab->single_chip_mlo_supp; + + /* WCN chipsets does not advertise in firmware features + * hence skip checking + */ + if (ab->hw_params->def_num_link) + return; + } + + if (!ag->mlo_capable) + return; + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + /* even if 1 device's firmware feature indicates MLO + * unsupported, make MLO unsupported for the whole group + */ + if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) { + ag->mlo_capable = false; + return; + } + } +} + +int ath12k_core_init(struct ath12k_base *ab) +{ + struct ath12k_hw_group *ag; + int ret; + + ret = ath12k_core_panic_notifier_register(ab); + if (ret) + ath12k_warn(ab, "failed to register panic handler: %d\n", ret); + + mutex_lock(&ath12k_hw_group_mutex); + + ag = ath12k_core_hw_group_assign(ab); + if (!ag) { + mutex_unlock(&ath12k_hw_group_mutex); + ath12k_warn(ab, "unable to get hw group\n"); + return -ENODEV; + } + + mutex_unlock(&ath12k_hw_group_mutex); + + mutex_lock(&ag->mutex); + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n", + ag->num_devices, ag->num_probed); + + if (ath12k_core_hw_group_create_ready(ag)) { + ret = ath12k_core_hw_group_create(ag); + if (ret) { + mutex_unlock(&ag->mutex); + ath12k_warn(ab, "unable to create hw group\n"); + goto err; + } + } + + mutex_unlock(&ag->mutex); + + return 0; + +err: + ath12k_core_hw_group_destroy(ab->ag); + ath12k_core_hw_group_unassign(ab); + return ret; +} + +void ath12k_core_deinit(struct ath12k_base *ab) +{ + ath12k_core_panic_notifier_unregister(ab); + ath12k_core_hw_group_cleanup(ab->ag); + ath12k_core_hw_group_destroy(ab->ag); + ath12k_core_hw_group_unassign(ab); } void ath12k_core_free(struct ath12k_base *ab) @@ -914,19 +1910,34 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, mutex_init(&ab->core_lock); spin_lock_init(&ab->base_lock); init_completion(&ab->reset_complete); - init_completion(&ab->reconfigure_complete); - init_completion(&ab->recovery_start); INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); init_waitqueue_head(&ab->wmi_ab.tx_credits_wq); INIT_WORK(&ab->restart_work, ath12k_core_restart); INIT_WORK(&ab->reset_work, ath12k_core_reset); + INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work); + INIT_WORK(&ab->dump_work, ath12k_coredump_upload); + timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); + init_completion(&ab->restart_completed); + init_completion(&ab->wow.wakeup_completed); ab->dev = dev; ab->hif.bus = bus; + ab->qmi.num_radios = U8_MAX; + ab->single_chip_mlo_supp = false; + + /* Device index used to identify the devices in a group. + * + * In Intra-device MLO, only one device present in a group, + * so it is always zero. + * + * In Inter-device MLO, Multiple device present in a group, + * expect non-zero value. + */ + ab->device_id = 0; return ab; @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_CORE_H @@ -11,6 +11,10 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/bitfield.h> +#include <linux/dmi.h> +#include <linux/ctype.h> +#include <linux/firmware.h> +#include <linux/panic_notifier.h> #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -22,6 +26,11 @@ #include "hal_rx.h" #include "reg.h" #include "dbring.h" +#include "fw.h" +#include "acpi.h" +#include "wow.h" +#include "debugfs_htt_stats.h" +#include "coredump.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -32,7 +41,17 @@ /* Pending management packets threshold for dropping probe responses */ #define ATH12K_PRB_RSP_DROP_THRESHOLD ((ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4) +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH12K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH12K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* The magic used by QCA spec */ +#define ATH12K_SMBIOS_BDF_EXT_MAGIC "BDF_" + #define ATH12K_INVALID_HW_MAC_ID 0xFF +#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ) #define ATH12K_RX_RATE_TABLE_NUM 320 #define ATH12K_RX_RATE_TABLE_11AX_NUM 576 @@ -44,6 +63,19 @@ #define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ) #define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ) +#define ATH12K_MAX_SOCS 3 +#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS) +#define ATH12K_INVALID_GROUP_ID 0xFF +#define ATH12K_INVALID_DEVICE_ID 0xFF + +#define ATH12K_MAX_MLO_PEERS 256 +#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF + +enum ath12k_bdf_search { + ATH12K_BDF_SEARCH_DEFAULT, + ATH12K_BDF_SEARCH_BUS_AND_BOARD, +}; + enum wme_ac { WME_AC_BE, WME_AC_BK, @@ -71,6 +103,14 @@ static inline enum wme_ac ath12k_tid_to_ac(u32 tid) WME_AC_VO); } +static inline u64 ath12k_le32hilo_to_u64(__le32 hi, __le32 lo) +{ + u64 hi64 = le32_to_cpu(hi); + u64 lo64 = le32_to_cpu(lo); + + return (hi64 << 32) | lo64; +} + enum ath12k_skb_flags { ATH12K_SKB_HW_80211_ENCAP = BIT(0), ATH12K_SKB_CIPHER_SET = BIT(1), @@ -83,6 +123,7 @@ struct ath12k_skb_cb { dma_addr_t paddr_ext_desc; u32 cipher; u8 flags; + u8 link_id; }; struct ath12k_skb_rxcb { @@ -95,7 +136,7 @@ struct ath12k_skb_rxcb { struct hal_rx_desc *rx_desc; u8 err_rel_src; u8 err_code; - u8 mac_id; + u8 hw_link_id; u8 unmapped; u8 is_frag; u8 tid; @@ -126,9 +167,16 @@ struct ath12k_ext_irq_grp { u32 grp_id; u64 timestamp; struct napi_struct napi; - struct net_device napi_ndev; + struct net_device *napi_ndev; }; +struct ath12k_smbios_bdf { + struct dmi_header hdr; + u32 padding; + u8 bdf_enabled; + u8 bdf_ext[]; +} __packed; + #define HEHANDLE_CAP_PHYINFO_SIZE 3 #define HECAP_PHYINFO_SIZE 9 #define HECAP_MACINFO_SIZE 5 @@ -153,8 +201,6 @@ struct ath12k_he { u32 heop_param; }; -#define MAX_RADIOS 3 - enum { WMI_HOST_TP_SCALE_MAX = 0, WMI_HOST_TP_SCALE_50 = 1, @@ -171,8 +217,13 @@ enum ath12k_scan_state { ATH12K_SCAN_ABORTING, }; +enum ath12k_hw_group_flags { + ATH12K_GROUP_FLAG_REGISTERED, + ATH12K_GROUP_FLAG_UNREGISTER, +}; + enum ath12k_dev_flags { - ATH12K_CAC_RUNNING, + ATH12K_FLAG_CAC_RUNNING, ATH12K_FLAG_CRASH_FLUSH, ATH12K_FLAG_RAW_MODE, ATH12K_FLAG_HW_CRYPTO_DISABLED, @@ -181,16 +232,39 @@ enum ath12k_dev_flags { ATH12K_FLAG_REGISTERED, ATH12K_FLAG_QMI_FAIL, ATH12K_FLAG_HTC_SUSPEND_COMPLETE, + ATH12K_FLAG_CE_IRQ_ENABLED, + ATH12K_FLAG_EXT_IRQ_ENABLED, + ATH12K_FLAG_QMI_FW_READY_COMPLETE, }; -enum ath12k_monitor_flags { - ATH12K_FLAG_MONITOR_ENABLED, +struct ath12k_tx_conf { + bool changed; + u16 ac; + struct ieee80211_tx_queue_params tx_queue_params; }; -struct ath12k_vif { +struct ath12k_key_conf { + enum set_key_cmd cmd; + struct list_head list; + struct ieee80211_sta *sta; + struct ieee80211_key_conf *key; +}; + +struct ath12k_vif_cache { + struct ath12k_tx_conf tx_conf; + struct ath12k_key_conf key_conf; + u32 bss_conf_changed; +}; + +struct ath12k_rekey_data { + u8 kck[NL80211_KCK_LEN]; + u8 kek[NL80211_KCK_LEN]; + u64 replay_ctr; + bool enable_offload; +}; + +struct ath12k_link_vif { u32 vdev_id; - enum wmi_vdev_type vdev_type; - enum wmi_vdev_subtype vdev_subtype; u32 beacon_interval; u32 dtim_period; u16 ast_hash; @@ -200,13 +274,38 @@ struct ath12k_vif { u8 search_type; struct ath12k *ar; - struct ieee80211_vif *vif; int bank_id; u8 vdev_id_check_en; struct wmi_wmm_params_all_arg wmm_params; struct list_head list; + + bool is_created; + bool is_started; + bool is_up; + u8 bssid[ETH_ALEN]; + struct cfg80211_bitrate_mask bitrate_mask; + struct delayed_work connection_loss_work; + int num_legacy_stations; + int rtscts_prot_mode; + int txpower; + bool rsnie_present; + bool wpaie_present; + struct ieee80211_chanctx_conf chanctx; + u8 vdev_stats_id; + u32 punct_bitmap; + u8 link_id; + struct ath12k_vif *ahvif; + struct ath12k_rekey_data rekey_data; +}; + +struct ath12k_vif { + enum wmi_vdev_type vdev_type; + enum wmi_vdev_subtype vdev_subtype; + struct ieee80211_vif *vif; + struct ath12k_hw *ah; + union { struct { u32 uapsd; @@ -224,26 +323,30 @@ struct ath12k_vif { } ap; } u; - bool is_started; - bool is_up; u32 aid; - u8 bssid[ETH_ALEN]; - struct cfg80211_bitrate_mask bitrate_mask; - int num_legacy_stations; - int rtscts_prot_mode; - int txpower; - bool rsnie_present; - bool wpaie_present; - struct ieee80211_chanctx_conf chanctx; u32 key_cipher; u8 tx_encap_type; - u8 vdev_stats_id; - u32 punct_bitmap; + bool ps; + + struct ath12k_link_vif deflink; + struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS]; + struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS]; + /* indicates bitmap of link vif created in FW */ + u16 links_map; + u8 last_scan_link; + + /* Must be last - ends in a flexible-array member. + * + * FIXME: Driver should not copy struct ieee80211_chanctx_conf, + * especially because it has a flexible array. Find a better way. + */ + struct ieee80211_chanctx_conf chanctx; }; struct ath12k_vif_iter { u32 vdev_id; - struct ath12k_vif *arvif; + struct ath12k *ar; + struct ath12k_link_vif *arvif; }; #define HAL_AST_IDX_INVALID 0xFFFF @@ -278,10 +381,6 @@ struct ath12k_rx_peer_stats { u64 non_ampdu_msdu_count; u64 stbc_count; u64 beamformed_count; - u64 mcs_count[HAL_RX_MAX_MCS + 1]; - u64 nss_count[HAL_RX_MAX_NSS]; - u64 bw_count[HAL_RX_BW_MAX]; - u64 gi_count[HAL_RX_GI_MAX]; u64 coding_count[HAL_RX_SU_MU_CODING_MAX]; u64 tid_count[IEEE80211_NUM_TIDS + 1]; u64 pream_cnt[HAL_RX_PREAMBLE_MAX]; @@ -378,39 +477,63 @@ struct ath12k_wbm_tx_stats { u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX]; }; -struct ath12k_sta { - struct ath12k_vif *arvif; +struct ath12k_link_sta { + struct ath12k_link_vif *arvif; + struct ath12k_sta *ahsta; + + /* link address similar to ieee80211_link_sta */ + u8 addr[ETH_ALEN]; /* the following are protected by ar->data_lock */ u32 changed; /* IEEE80211_RC_* */ u32 bw; u32 nss; u32 smps; - enum hal_pn_type pn_type; - struct work_struct update_wk; + struct wiphy_work update_wk; struct rate_info txrate; struct rate_info last_txrate; u64 rx_duration; u64 tx_duration; u8 rssi_comb; + u8 link_id; struct ath12k_rx_peer_stats *rx_stats; struct ath12k_wbm_tx_stats *wbm_tx_stats; u32 bw_prev; + + /* For now the assoc link will be considered primary */ + bool is_assoc_link; + + /* for firmware use only */ + u8 link_idx; +}; + +struct ath12k_sta { + struct ath12k_vif *ahvif; + enum hal_pn_type pn_type; + struct ath12k_link_sta deflink; + struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + /* indicates bitmap of link sta created in FW */ + u16 links_map; + u8 assoc_link_id; + u16 ml_peer_id; + u8 num_peer; + + enum ieee80211_sta_state state; }; #define ATH12K_MIN_5G_FREQ 4150 -#define ATH12K_MIN_6G_FREQ 5945 +#define ATH12K_MIN_6G_FREQ 5925 #define ATH12K_MAX_6G_FREQ 7115 -#define ATH12K_NUM_CHANS 100 +#define ATH12K_NUM_CHANS 101 #define ATH12K_MAX_5G_CHAN 173 -enum ath12k_state { - ATH12K_STATE_OFF, - ATH12K_STATE_ON, - ATH12K_STATE_RESTARTING, - ATH12K_STATE_RESTARTED, - ATH12K_STATE_WEDGED, +enum ath12k_hw_state { + ATH12K_HW_STATE_OFF, + ATH12K_HW_STATE_ON, + ATH12K_HW_STATE_RESTARTING, + ATH12K_HW_STATE_RESTARTED, + ATH12K_HW_STATE_WEDGED, /* Add other states as required */ }; @@ -425,6 +548,19 @@ struct ath12k_fw_stats { struct list_head bcn; }; +struct ath12k_dbg_htt_stats { + enum ath12k_dbg_htt_ext_stats_type type; + u32 cfg_param[4]; + u8 reset; + struct debug_htt_stats_req *stats_req; +}; + +struct ath12k_debug { + struct dentry *debugfs_pdev; + struct dentry *debugfs_pdev_symlink; + struct ath12k_dbg_htt_stats htt_stats; +}; + struct ath12k_per_peer_tx_stats { u32 succ_bytes; u32 retry_bytes; @@ -448,15 +584,13 @@ struct ath12k_per_peer_tx_stats { struct ath12k { struct ath12k_base *ab; struct ath12k_pdev *pdev; - struct ieee80211_hw *hw; - struct ieee80211_ops *ops; + struct ath12k_hw *ah; struct ath12k_wmi_pdev *wmi; struct ath12k_pdev_dp dp; u8 mac_addr[ETH_ALEN]; u32 ht_cap_info; u32 vht_cap_info; struct ath12k_he ar_he; - enum ath12k_state state; bool supports_6ghz; struct { struct completion started; @@ -465,9 +599,10 @@ struct ath12k { struct delayed_work timeout; enum ath12k_scan_state state; bool is_roc; - int vdev_id; int roc_freq; bool roc_notify; + struct wiphy_work vdev_clean_wk; + struct ath12k_link_vif *arvif; } scan; struct { @@ -478,7 +613,6 @@ struct ath12k { unsigned long dev_flags; unsigned int filter_flags; - unsigned long monitor_flags; u32 min_tx_power; u32 max_tx_power; u32 txpower_limit_2g; @@ -488,13 +622,9 @@ struct ath12k { u32 chan_tx_pwr; u32 num_stations; u32 max_num_stations; - bool monitor_present; - /* To synchronize concurrent synchronous mac80211 callback operations, - * concurrent debugfs configuration and concurrent FW statistics events. - */ - struct mutex conf_mutex; + /* protects the radio specific data like debug stats, ppdu_stats_info stats, - * vdev_stop_status info, scan data, ath12k_sta info, ath12k_vif info, + * vdev_stop_status info, scan data, ath12k_sta info, ath12k_link_vif info, * channel context data, survey info, test mode data. */ spinlock_t data_lock; @@ -513,6 +643,7 @@ struct ath12k { /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */ u8 pdev_idx; u8 lmac_id; + u8 hw_link_id; struct completion peer_assoc_done; struct completion peer_delete_done; @@ -554,9 +685,12 @@ struct ath12k { struct work_struct regd_update_work; - struct work_struct wmi_mgmt_tx_work; + struct wiphy_work wmi_mgmt_tx_work; struct sk_buff_head wmi_mgmt_tx_queue; + struct ath12k_wow wow; + struct completion target_suspend; + bool target_suspend_ack; struct ath12k_per_peer_tx_stats peer_tx_stats; struct list_head ppdu_stats_info; u32 ppdu_stat_list_depth; @@ -564,12 +698,45 @@ struct ath12k { struct ath12k_per_peer_tx_stats cached_stats; u32 last_ppdu_id; u32 cached_ppdu_id; +#ifdef CONFIG_ATH12K_DEBUGFS + struct ath12k_debug debug; +#endif bool dfs_block_radar_events; bool monitor_conf_enabled; bool monitor_vdev_created; bool monitor_started; int monitor_vdev_id; + + struct wiphy_radio_freq_range freq_range; + + bool nlo_enabled; + + struct completion mlo_setup_done; + u32 mlo_setup_status; +}; + +struct ath12k_hw { + struct ieee80211_hw *hw; + struct device *dev; + + /* Protect the write operation of the hardware state ath12k_hw::state + * between hardware start<=>reconfigure<=>stop transitions. + */ + struct mutex hw_mutex; + enum ath12k_hw_state state; + bool regd_updated; + bool use_6ghz_regd; + + u8 num_radio; + + DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS); + + /* protected by wiphy_lock() */ + struct list_head ml_peers; + + /* Keep last */ + struct ath12k radio[] __aligned(sizeof(void *)); }; struct ath12k_band_cap { @@ -602,6 +769,8 @@ struct ath12k_pdev_cap { u32 tx_chain_mask_shift; u32 rx_chain_mask_shift; struct ath12k_band_cap band[NUM_NL80211_BANDS]; + u32 eml_cap; + u32 mld_cap; }; struct mlo_timestamp { @@ -618,6 +787,7 @@ struct mlo_timestamp { struct ath12k_pdev { struct ath12k *ar; u32 pdev_id; + u32 hw_link_id; struct ath12k_pdev_cap cap; u8 mac_addr[ETH_ALEN]; struct mlo_timestamp timestamp; @@ -653,6 +823,53 @@ struct ath12k_soc_dp_stats { struct ath12k_soc_dp_tx_err_stats tx_err; }; +struct ath12k_mlo_memory { + struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01]; + int mlo_mem_size; + bool init_done; +}; + +struct ath12k_hw_link { + u8 device_id; + u8 pdev_idx; +}; + +/* Holds info on the group of devices that are registered as a single + * wiphy, protected with struct ath12k_hw_group::mutex. + */ +struct ath12k_hw_group { + struct list_head list; + u8 id; + u8 num_devices; + u8 num_probed; + u8 num_started; + unsigned long flags; + struct ath12k_base *ab[ATH12K_MAX_SOCS]; + + /* protects access to this struct */ + struct mutex mutex; + + /* Holds information of wiphy (hw) registration. + * + * In Multi/Single Link Operation case, all pdevs are registered as + * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is + * registered as separate wiphys. + */ + struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO]; + u8 num_hw; + bool mlo_capable; + struct device_node *wsi_node[ATH12K_MAX_SOCS]; + struct ath12k_mlo_memory mlo_mem; + struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO]; + bool hw_link_id_init_done; +}; + +/* Holds WSI info specific to each device, excluding WSI group info */ +struct ath12k_wsi_info { + u32 index; + u32 hw_link_id_base; +}; + /* Master structure to hold the hw data which may be used in core module */ struct ath12k_base { enum ath12k_hw_rev hw_rev; @@ -661,10 +878,15 @@ struct ath12k_base { struct ath12k_qmi qmi; struct ath12k_wmi_base wmi_ab; struct completion fw_ready; + u8 device_id; int num_radios; /* HW channel counters frequency value in hertz common to all MACs */ u32 cc_freq_hz; + struct ath12k_dump_file_data *dump_data; + size_t ath12k_coredump_len; + struct work_struct dump_work; + struct ath12k_htc htc; struct ath12k_dp dp; @@ -677,6 +899,11 @@ struct ath12k_base { const struct ath12k_hif_ops *ops; } hif; + struct { + struct completion wakeup_completed; + u32 wmi_conf_rx_decap_mode; + } wow; + struct ath12k_ce ce; struct timer_list rx_replenish_retry; struct ath12k_hal hal; @@ -705,6 +932,7 @@ struct ath12k_base { u8 fw_pdev_count; struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS]; + struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS]; unsigned long long free_vdev_map; unsigned long long free_vdev_stats_id_map; @@ -719,7 +947,6 @@ struct ath12k_base { struct ath12k_wmi_target_cap_arg target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; - int bd_api; const struct ath12k_hw_params *hw_params; @@ -738,6 +965,9 @@ struct ath12k_base { /* Current DFS Regulatory */ enum ath12k_dfs_region dfs_region; struct ath12k_soc_dp_stats soc_stats; +#ifdef CONFIG_ATH12K_DEBUGFS + struct dentry *debugfs_soc; +#endif unsigned long dev_flags; struct completion driver_recovery; @@ -747,11 +977,8 @@ struct ath12k_base { struct work_struct reset_work; atomic_t reset_count; atomic_t recovery_count; - atomic_t recovery_start_count; bool is_reset; struct completion reset_complete; - struct completion reconfigure_complete; - struct completion recovery_start; /* continuous recovery fail count */ atomic_t fail_cont_count; unsigned long reset_fail_timeout; @@ -771,10 +998,70 @@ struct ath12k_base { u64 fw_soc_drop_count; bool static_window_map; + struct work_struct rfkill_work; + /* true means radio is on */ + bool rfkill_radio_on; + + struct { + enum ath12k_bdf_search bdf_search; + u32 vendor; + u32 device; + u32 subsystem_vendor; + u32 subsystem_device; + } id; + + struct { + u32 api_version; + + const struct firmware *fw; + const u8 *amss_data; + size_t amss_len; + const u8 *amss_dualmac_data; + size_t amss_dualmac_len; + const u8 *m3_data; + size_t m3_len; + + DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT); + } fw; + + const struct hal_rx_ops *hal_rx_ops; + + /* Denotes the whether MLO is possible within the chip */ + bool single_chip_mlo_supp; + + struct completion restart_completed; + +#ifdef CONFIG_ACPI + + struct { + bool started; + u32 func_bit; + bool acpi_tas_enable; + bool acpi_bios_sar_enable; + u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE]; + u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE]; + u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE]; + u8 geo_offset_data[ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE]; + u8 cca_data[ATH12K_ACPI_DSM_CCA_DATA_SIZE]; + u8 band_edge_power[ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE]; + } acpi; + +#endif /* CONFIG_ACPI */ + + struct notifier_block panic_nb; + + struct ath12k_hw_group *ag; + struct ath12k_wsi_info wsi_info; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; +struct ath12k_pdev_map { + struct ath12k_base *ab; + u8 pdev_idx; +}; + int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab); int ath12k_core_pre_init(struct ath12k_base *ab); int ath12k_core_init(struct ath12k_base *ath12k); @@ -788,14 +1075,23 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, int ath12k_core_fetch_bdf(struct ath12k_base *ath12k, struct ath12k_board_data *bd); void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd); +int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd); int ath12k_core_check_dt(struct ath12k_base *ath12k); - +int ath12k_core_check_smbios(struct ath12k_base *ab); void ath12k_core_halt(struct ath12k *ar); +int ath12k_core_resume_early(struct ath12k_base *ab); int ath12k_core_resume(struct ath12k_base *ab); int ath12k_core_suspend(struct ath12k_base *ab); +int ath12k_core_suspend_late(struct ath12k_base *ab); +void ath12k_core_hw_group_unassign(struct ath12k_base *ab); const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, const char *filename); +u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab); +u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab); +u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab); + +void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag); static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state) { @@ -826,11 +1122,26 @@ static inline struct ath12k_skb_rxcb *ATH12K_SKB_RXCB(struct sk_buff *skb) return (struct ath12k_skb_rxcb *)skb->cb; } -static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif) +static inline struct ath12k_vif *ath12k_vif_to_ahvif(struct ieee80211_vif *vif) { return (struct ath12k_vif *)vif->drv_priv; } +static inline struct ath12k_sta *ath12k_sta_to_ahsta(struct ieee80211_sta *sta) +{ + return (struct ath12k_sta *)sta->drv_priv; +} + +static inline struct ieee80211_sta *ath12k_ahsta_to_sta(struct ath12k_sta *ahsta) +{ + return container_of((void *)ahsta, struct ieee80211_sta, drv_priv); +} + +static inline struct ieee80211_vif *ath12k_ahvif_to_vif(struct ath12k_vif *ahvif) +{ + return container_of((void *)ahvif, struct ieee80211_vif, drv_priv); +} + static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab, int mac_id) { @@ -855,4 +1166,68 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus) return "unknown"; } +static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah, u8 hw_link_id) +{ + if (WARN(hw_link_id >= ah->num_radio, + "bad hw link id %d, so switch to default link\n", hw_link_id)) + hw_link_id = 0; + + return &ah->radio[hw_link_id]; +} + +static inline struct ath12k_hw *ath12k_ar_to_ah(struct ath12k *ar) +{ + return ar->ah; +} + +static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar) +{ + return ar->ah->hw; +} + +#define for_each_ar(ah, ar, index) \ + for ((index) = 0; ((index) < (ah)->num_radio && \ + ((ar) = &(ah)->radio[(index)])); (index)++) + +static inline struct ath12k_hw *ath12k_ag_to_ah(struct ath12k_hw_group *ag, int idx) +{ + return ag->ah[idx]; +} + +static inline void ath12k_ag_set_ah(struct ath12k_hw_group *ag, int idx, + struct ath12k_hw *ah) +{ + ag->ah[idx] = ah; +} + +static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab) +{ + return ab->ag; +} + +static inline void ath12k_core_started(struct ath12k_base *ab) +{ + lockdep_assert_held(&ab->ag->mutex); + + ab->ag->num_started++; +} + +static inline void ath12k_core_stopped(struct ath12k_base *ab) +{ + lockdep_assert_held(&ab->ag->mutex); + + ab->ag->num_started--; +} + +static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag, + u8 device_id) +{ + return ag->ab[device_id]; +} + #endif /* _CORE_H_ */ diff --git a/coredump.c b/coredump.c new file mode 100644 index 000000000000..ce1beeb54836 --- /dev/null +++ b/coredump.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <linux/devcoredump.h> +#include "hif.h" +#include "coredump.h" +#include "debug.h" + +enum +ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type(enum ath12k_qmi_target_mem type) +{ + enum ath12k_fw_crash_dump_type dump_type; + + switch (type) { + case HOST_DDR_REGION_TYPE: + dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA; + break; + case M3_DUMP_REGION_TYPE: + dump_type = FW_CRASH_DUMP_M3_DUMP; + break; + case PAGEABLE_MEM_REGION_TYPE: + dump_type = FW_CRASH_DUMP_PAGEABLE_DATA; + break; + case BDF_MEM_REGION_TYPE: + case CALDB_MEM_REGION_TYPE: + dump_type = FW_CRASH_DUMP_NONE; + break; + case MLO_GLOBAL_MEM_REGION_TYPE: + dump_type = FW_CRASH_DUMP_MLO_GLOBAL_DATA; + break; + default: + dump_type = FW_CRASH_DUMP_TYPE_MAX; + break; + } + + return dump_type; +} + +void ath12k_coredump_upload(struct work_struct *work) +{ + struct ath12k_base *ab = container_of(work, struct ath12k_base, dump_work); + + ath12k_info(ab, "Uploading coredump\n"); + /* dev_coredumpv() takes ownership of the buffer */ + dev_coredumpv(ab->dev, ab->dump_data, ab->ath12k_coredump_len, GFP_KERNEL); + ab->dump_data = NULL; +} + +void ath12k_coredump_collect(struct ath12k_base *ab) +{ + ath12k_hif_coredump_download(ab); +} diff --git a/coredump.h b/coredump.h new file mode 100644 index 000000000000..13f46a605113 --- /dev/null +++ b/coredump.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _ATH12K_COREDUMP_H_ +#define _ATH12K_COREDUMP_H_ + +#define ATH12K_FW_CRASH_DUMP_V2 2 + +enum ath12k_fw_crash_dump_type { + FW_CRASH_DUMP_PAGING_DATA, + FW_CRASH_DUMP_RDDM_DATA, + FW_CRASH_DUMP_REMOTE_MEM_DATA, + FW_CRASH_DUMP_PAGEABLE_DATA, + FW_CRASH_DUMP_M3_DUMP, + FW_CRASH_DUMP_NONE, + FW_CRASH_DUMP_MLO_GLOBAL_DATA, + + /* keep last */ + FW_CRASH_DUMP_TYPE_MAX, +}; + +#define COREDUMP_TLV_HDR_SIZE 8 + +struct ath12k_tlv_dump_data { + /* see ath11k_fw_crash_dump_type above */ + __le32 type; + + /* in bytes */ + __le32 tlv_len; + + /* pad to 32-bit boundaries as needed */ + u8 tlv_data[]; +} __packed; + +struct ath12k_dump_file_data { + /* "ATH12K-FW-DUMP" */ + char df_magic[16]; + /* total dump len in bytes */ + __le32 len; + /* file dump version */ + __le32 version; + /* pci device id */ + __le32 chip_id; + /* qrtr instance id */ + __le32 qrtr_id; + /* pci domain id */ + __le32 bus_id; + guid_t guid; + /* time-of-day stamp */ + __le64 tv_sec; + /* time-of-day stamp, nano-seconds */ + __le64 tv_nsec; + /* room for growth w/out changing binary format */ + u8 unused[128]; + u8 data[]; +} __packed; + +#ifdef CONFIG_ATH12K_COREDUMP +enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type + (enum ath12k_qmi_target_mem type); +void ath12k_coredump_upload(struct work_struct *work); +void ath12k_coredump_collect(struct ath12k_base *ab); +#else +static inline enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type + (enum ath12k_qmi_target_mem type) +{ + return FW_CRASH_DUMP_TYPE_MAX; +} + +static inline void ath12k_coredump_upload(struct work_struct *work) +{ +} + +static inline void ath12k_coredump_collect(struct ath12k_base *ab) +{ +} +#endif + +#endif @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/vmalloc.h> @@ -36,7 +36,7 @@ void ath12k_err(struct ath12k_base *ab, const char *fmt, ...) va_end(args); } -void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...) +void __ath12k_warn(struct device *dev, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -45,7 +45,7 @@ void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...) va_start(args, fmt); vaf.va = &args; - dev_warn_ratelimited(ab->dev, "%pV", &vaf); + dev_warn_ratelimited(dev, "%pV", &vaf); /* TODO: Trace the log */ va_end(args); } @@ -64,7 +64,7 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask, vaf.va = &args; if (ath12k_debug_mask & mask) - dev_dbg(ab->dev, "%pV", &vaf); + dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf); /* TODO: trace log */ @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH12K_DEBUG_H_ @@ -25,12 +25,16 @@ enum ath12k_debug_mask { ATH12K_DBG_PCI = 0x00001000, ATH12K_DBG_DP_TX = 0x00002000, ATH12K_DBG_DP_RX = 0x00004000, + ATH12K_DBG_WOW = 0x00008000, ATH12K_DBG_ANY = 0xffffffff, }; __printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...); __printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...); -__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...); +__printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...); + +#define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__) +#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__) extern unsigned int ath12k_debug_mask; diff --git a/debugfs.c b/debugfs.c new file mode 100644 index 000000000000..d4b32d1a431c --- /dev/null +++ b/debugfs.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "debugfs.h" +#include "debugfs_htt_stats.h" + +static ssize_t ath12k_write_simulate_radar(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + int ret; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + ret = ath12k_wmi_simulate_radar(ar); + if (ret) + goto exit; + + ret = count; +exit: + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + return ret; +} + +static const struct file_operations fops_simulate_radar = { + .write = ath12k_write_simulate_radar, + .open = simple_open +}; + +void ath12k_debugfs_soc_create(struct ath12k_base *ab) +{ + bool dput_needed; + char soc_name[64] = { 0 }; + struct dentry *debugfs_ath12k; + + debugfs_ath12k = debugfs_lookup("ath12k", NULL); + if (debugfs_ath12k) { + /* a dentry from lookup() needs dput() after we don't use it */ + dput_needed = true; + } else { + debugfs_ath12k = debugfs_create_dir("ath12k", NULL); + if (IS_ERR_OR_NULL(debugfs_ath12k)) + return; + dput_needed = false; + } + + scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus), + dev_name(ab->dev)); + + ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k); + + if (dput_needed) + dput(debugfs_ath12k); +} + +void ath12k_debugfs_soc_destroy(struct ath12k_base *ab) +{ + debugfs_remove_recursive(ab->debugfs_soc); + ab->debugfs_soc = NULL; + /* We are not removing ath12k directory on purpose, even if it + * would be empty. This simplifies the directory handling and it's + * a minor cosmetic issue to leave an empty ath12k directory to + * debugfs. + */ +} + +void ath12k_debugfs_register(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + struct ieee80211_hw *hw = ar->ah->hw; + char pdev_name[5]; + char buf[100] = {0}; + + scnprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx); + + ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); + + /* Create a symlink under ieee80211/phy* */ + scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev); + ar->debug.debugfs_pdev_symlink = debugfs_create_symlink("ath12k", + hw->wiphy->debugfsdir, + buf); + + if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) { + debugfs_create_file("dfs_simulate_radar", 0200, + ar->debug.debugfs_pdev, ar, + &fops_simulate_radar); + } + + ath12k_debugfs_htt_stats_register(ar); +} + +void ath12k_debugfs_unregister(struct ath12k *ar) +{ + if (!ar->debug.debugfs_pdev) + return; + + /* Remove symlink under ieee80211/phy* */ + debugfs_remove(ar->debug.debugfs_pdev_symlink); + debugfs_remove_recursive(ar->debug.debugfs_pdev); + ar->debug.debugfs_pdev_symlink = NULL; + ar->debug.debugfs_pdev = NULL; +} diff --git a/debugfs.h b/debugfs.h new file mode 100644 index 000000000000..8d64ba03aa9a --- /dev/null +++ b/debugfs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _ATH12K_DEBUGFS_H_ +#define _ATH12K_DEBUGFS_H_ + +#ifdef CONFIG_ATH12K_DEBUGFS +void ath12k_debugfs_soc_create(struct ath12k_base *ab); +void ath12k_debugfs_soc_destroy(struct ath12k_base *ab); +void ath12k_debugfs_register(struct ath12k *ar); +void ath12k_debugfs_unregister(struct ath12k *ar); +#else +static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab) +{ +} + +static inline void ath12k_debugfs_soc_destroy(struct ath12k_base *ab) +{ +} + +static inline void ath12k_debugfs_register(struct ath12k *ar) +{ +} + +static inline void ath12k_debugfs_unregister(struct ath12k *ar) +{ +} + +#endif /* CONFIG_ATH12K_DEBUGFS */ + +#endif /* _ATH12K_DEBUGFS_H_ */ diff --git a/debugfs_htt_stats.c b/debugfs_htt_stats.c new file mode 100644 index 000000000000..41e4ef2ef3af --- /dev/null +++ b/debugfs_htt_stats.c @@ -0,0 +1,4389 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/vmalloc.h> +#include "core.h" +#include "debug.h" +#include "debugfs_htt_stats.h" +#include "dp_tx.h" +#include "dp_rx.h" + +static u32 +print_array_to_buf_index(u8 *buf, u32 offset, const char *header, u32 stats_index, + const __le32 *array, u32 array_len, const char *footer) +{ + int index = 0; + u8 i; + + if (header) { + index += scnprintf(buf + offset, + ATH12K_HTT_STATS_BUF_SIZE - offset, + "%s = ", header); + } + for (i = 0; i < array_len; i++) { + index += scnprintf(buf + offset + index, + (ATH12K_HTT_STATS_BUF_SIZE - offset) - index, + " %u:%u,", stats_index++, le32_to_cpu(array[i])); + } + /* To overwrite the last trailing comma */ + index--; + *(buf + offset + index) = '\0'; + + if (footer) { + index += scnprintf(buf + offset + index, + (ATH12K_HTT_STATS_BUF_SIZE - offset) - index, + "%s", footer); + } + return index; +} + +static u32 +print_array_to_buf(u8 *buf, u32 offset, const char *header, + const __le32 *array, u32 array_len, const char *footer) +{ + return print_array_to_buf_index(buf, offset, header, 0, array, array_len, + footer); +} + +static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size) +{ + switch (ru_size) { + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26: + return "26"; + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52: + return "52"; + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106: + return "106"; + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242: + return "242"; + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484: + return "484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996: + return "996"; + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2: + return "996x2"; + default: + return "unknown"; + } +} + +static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size) +{ + switch (ru_size) { + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26: + return "26"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52: + return "52"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52_26: + return "52+26"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106: + return "106"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106_26: + return "106+26"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_242: + return "242"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484: + return "484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484_242: + return "484+242"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996: + return "996"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484: + return "996+484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484_242: + return "996+484+242"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2: + return "996x2"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2_484: + return "996x2+484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3: + return "996x3"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3_484: + return "996x3+484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x4: + return "996x4"; + default: + return "unknown"; + } +} + +static const char* +ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size) +{ + if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY) + return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size); + else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU) + return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size); + else + return "unknown"; +} + +static void +htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "comp_delivered = %u\n", + le32_to_cpu(htt_stats_buf->comp_delivered)); + len += scnprintf(buf + len, buf_len - len, "self_triggers = %u\n", + le32_to_cpu(htt_stats_buf->self_triggers)); + len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n", + le32_to_cpu(htt_stats_buf->hw_queued)); + len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n", + le32_to_cpu(htt_stats_buf->hw_reaped)); + len += scnprintf(buf + len, buf_len - len, "underrun = %u\n", + le32_to_cpu(htt_stats_buf->underrun)); + len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n", + le32_to_cpu(htt_stats_buf->hw_paused)); + len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n", + le32_to_cpu(htt_stats_buf->hw_flush)); + len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n", + le32_to_cpu(htt_stats_buf->hw_filt)); + len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", + le32_to_cpu(htt_stats_buf->tx_abort)); + len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n", + le32_to_cpu(htt_stats_buf->ppdu_ok)); + len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_requed)); + len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n", + le32_to_cpu(htt_stats_buf->tx_xretry)); + len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n", + le32_to_cpu(htt_stats_buf->data_rc)); + len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_dropped_xretry)); + len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n", + le32_to_cpu(htt_stats_buf->illgl_rate_phy_err)); + len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n", + le32_to_cpu(htt_stats_buf->cont_xretry)); + len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n", + le32_to_cpu(htt_stats_buf->tx_timeout)); + len += scnprintf(buf + len, buf_len - len, "tx_time_dur_data = %u\n", + le32_to_cpu(htt_stats_buf->tx_time_dur_data)); + len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n", + le32_to_cpu(htt_stats_buf->pdev_resets)); + len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n", + le32_to_cpu(htt_stats_buf->phy_underrun)); + len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n", + le32_to_cpu(htt_stats_buf->txop_ovf)); + len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->seq_posted)); + len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n", + le32_to_cpu(htt_stats_buf->seq_failed_queueing)); + len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n", + le32_to_cpu(htt_stats_buf->seq_completed)); + len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n", + le32_to_cpu(htt_stats_buf->seq_restarted)); + len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_txop_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n", + le32_to_cpu(htt_stats_buf->next_seq_cancel)); + len += scnprintf(buf + len, buf_len - len, "dl_mu_mimo_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "dl_mu_ofdma_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_ofdma_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "ul_mu_mimo_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->ul_mumimo_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "ul_mu_ofdma_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->ul_ofdma_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_peer_blacklisted = %u\n", + le32_to_cpu(htt_stats_buf->num_mu_peer_blacklisted)); + len += scnprintf(buf + len, buf_len - len, "seq_qdepth_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_qdepth_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_min_msdu_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "mu_seq_min_msdu_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->mu_seq_min_msdu_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n", + le32_to_cpu(htt_stats_buf->seq_switch_hw_paused)); + len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n", + le32_to_cpu(htt_stats_buf->next_seq_posted_dsr)); + len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n", + le32_to_cpu(htt_stats_buf->seq_posted_isr)); + len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n", + le32_to_cpu(htt_stats_buf->seq_ctrl_cached)); + len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_count_tqm)); + len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n", + le32_to_cpu(htt_stats_buf->msdu_count_tqm)); + len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_removed_tqm)); + len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n", + le32_to_cpu(htt_stats_buf->msdu_removed_tqm)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdus_max_retries)); + len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_sw_flush)); + len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_hw_filter)); + len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_truncated)); + len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_ack_failed)); + len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_expired)); + len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_seq_hw_retry)); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + le32_to_cpu(htt_stats_buf->ack_tlv_proc)); + len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n", + le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt_valid)); + len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n", + le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt)); + len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n", + le32_to_cpu(htt_stats_buf->num_total_ppdus_tried_ota)); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n", + le32_to_cpu(htt_stats_buf->num_data_ppdus_tried_ota)); + len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n", + le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_enqued)); + len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n", + le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_freed)); + len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n", + le32_to_cpu(htt_stats_buf->local_data_enqued)); + len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n", + le32_to_cpu(htt_stats_buf->local_data_freed)); + len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_tried)); + len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->isr_wait_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n", + le32_to_cpu(htt_stats_buf->tx_active_dur_us_low)); + len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n", + le32_to_cpu(htt_stats_buf->tx_active_dur_us_high)); + len += scnprintf(buf + len, buf_len - len, "fes_offsets_err_cnt = %u\n\n", + le32_to_cpu(htt_stats_buf->fes_offsets_err_cnt)); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_urrn_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_urrn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + HTT_TX_PDEV_MAX_URRN_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_URRN_TLV:\n"); + + len += print_array_to_buf(buf, len, "urrn_stats", htt_stats_buf->urrn_stats, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_flush_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_flush_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_FLUSH_TLV:\n"); + + len += print_array_to_buf(buf, len, "flush_errs", htt_stats_buf->flush_errs, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_sifs_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sifs_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SIFS_TLV:\n"); + + len += print_array_to_buf(buf, len, "sifs_status", htt_stats_buf->sifs_status, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv *htt_stats_buf = tag_buf; + char *mode; + u8 j, hw_mode, i, str_buf_len; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 stats_value; + u8 max_ppdu = ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST; + u8 max_sched = ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS; + char str_buf[ATH12K_HTT_MAX_STRING_LEN]; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + hw_mode = le32_to_cpu(htt_stats_buf->hw_mode); + + switch (hw_mode) { + case ATH12K_HTT_STATS_HWMODE_AC: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AC_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "ac"; + break; + case ATH12K_HTT_STATS_HWMODE_AX: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AX_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "ax"; + break; + case ATH12K_HTT_STATS_HWMODE_BE: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_BE_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "be"; + break; + default: + return; + } + + for (i = 0; i < ATH12K_HTT_STATS_NUM_NR_BINS ; i++) { + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_seq_posted_nr%u = %u\n", mode, + ((i + 1) * 4), htt_stats_buf->num_seq_posted[i]); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_posted_per_burst + [i * max_ppdu + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_ppdu_posted_per_burst_nr%u = %s\n", + mode, ((i + 1) * 4), str_buf); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_cmpl_per_burst + [i * max_ppdu + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_ppdu_completed_per_burst_nr%u = %s\n", + mode, ((i + 1) * 4), str_buf); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_seq_term_status + [i * max_sched + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_seq_term_status_nr%u = %s\n\n", + mode, ((i + 1) * 4), str_buf); + } + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_sifs_hist_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SIFS_HIST_TLV:\n"); + + len += print_array_to_buf(buf, len, "sifs_hist_status", + htt_stats_buf->sifs_hist_status, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_CTRL_PATH_TX_STATS:\n"); + len += print_array_to_buf(buf, len, "fw_tx_mgmt_subtype", + htt_stats_buf->fw_tx_mgmt_subtype, + ATH12K_HTT_STATS_SUBTYPE_MAX, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n", + le32_to_cpu(htt_stats_buf->current_timestamp)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, + ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n", + u32_get_bits(mac_id_word, + ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID)); + len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n", + le32_to_cpu(htt_stats_buf->sched_policy)); + len += scnprintf(buf + len, buf_len - len, + "last_sched_cmd_posted_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_sched_cmd_posted_timestamp)); + len += scnprintf(buf + len, buf_len - len, + "last_sched_cmd_compl_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_sched_cmd_compl_timestamp)); + len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n", + le32_to_cpu(htt_stats_buf->sched_2_tac_lwm_count)); + len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n", + le32_to_cpu(htt_stats_buf->sched_2_tac_ring_full)); + len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n", + le32_to_cpu(htt_stats_buf->sched_cmd_post_failure)); + len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n", + le32_to_cpu(htt_stats_buf->num_active_tids)); + len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n", + le32_to_cpu(htt_stats_buf->num_ps_schedules)); + len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n", + le32_to_cpu(htt_stats_buf->sched_cmds_pending)); + len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n", + le32_to_cpu(htt_stats_buf->num_tid_register)); + len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n", + le32_to_cpu(htt_stats_buf->num_tid_unregister)); + len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n", + le32_to_cpu(htt_stats_buf->num_qstats_queried)); + len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n", + le32_to_cpu(htt_stats_buf->qstats_update_pending)); + len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_qstats_query_timestamp)); + len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n", + le32_to_cpu(htt_stats_buf->num_tqm_cmdq_full)); + len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_de_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_rt_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_tqm_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n", + le32_to_cpu(htt_stats_buf->notify_sched)); + len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n", + le32_to_cpu(htt_stats_buf->dur_based_sendn_term)); + len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_notify2_sched)); + len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_optimal_queued_msdus_sched)); + len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_delay_timeout_sched)); + len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n", + le32_to_cpu(htt_stats_buf->su_min_txtime_sched_delay)); + len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n", + le32_to_cpu(htt_stats_buf->su_no_delay)); + len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n", + le32_to_cpu(htt_stats_buf->num_supercycles)); + len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n", + le32_to_cpu(htt_stats_buf->num_subcycles_with_sort)); + len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n\n", + le32_to_cpu(htt_stats_buf->num_subcycles_no_sort)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_cmd_posted_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_cmd_posted_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_cmd_posted", + htt_stats_buf->sched_cmd_posted, num_elements, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_cmd_reaped_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_cmd_reaped_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_cmd_reaped", + htt_stats_buf->sched_cmd_reaped, num_elements, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_sched_order_su_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_sched_order_su_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG); + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_order_su", + htt_stats_buf->sched_order_su, + sched_order_su_num_entries, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_sched_ineligibility_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_sched_ineligibility_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 sched_ineligibility_num_entries = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SCHED_INELIGIBILITY:\n"); + len += print_array_to_buf(buf, len, "sched_ineligibility", + htt_stats_buf->sched_ineligibility, + sched_ineligibility_num_entries, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX); + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:\n"); + len += print_array_to_buf(buf, len, "supercycle_triggers", + htt_stats_buf->supercycle_triggers, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_pdev_errs_tlv *htt_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_buf)) + return; + + mac_id_word = le32_to_cpu(htt_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", + le32_to_cpu(htt_buf->tx_abort)); + len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n", + le32_to_cpu(htt_buf->tx_abort_fail_count)); + len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n", + le32_to_cpu(htt_buf->rx_abort)); + len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n", + le32_to_cpu(htt_buf->rx_abort_fail_count)); + len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n", + le32_to_cpu(htt_buf->rx_flush_cnt)); + len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n", + le32_to_cpu(htt_buf->warm_reset)); + len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n", + le32_to_cpu(htt_buf->cold_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n", + le32_to_cpu(htt_buf->mac_cold_reset_restore_cal)); + len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n", + le32_to_cpu(htt_buf->mac_cold_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n", + le32_to_cpu(htt_buf->mac_warm_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n", + le32_to_cpu(htt_buf->mac_only_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_ucode_trig)); + len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n", + le32_to_cpu(htt_buf->mac_warm_reset_restore_cal)); + len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n", + le32_to_cpu(htt_buf->mac_sfm_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_m3_ssr)); + len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n", + le32_to_cpu(htt_buf->fw_rx_rings_reset)); + len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n", + le32_to_cpu(htt_buf->tx_flush)); + len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n", + le32_to_cpu(htt_buf->tx_glb_reset)); + len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n", + le32_to_cpu(htt_buf->tx_txq_reset)); + len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n", + le32_to_cpu(htt_buf->rx_timeout_reset)); + + len += scnprintf(buf + len, buf_len - len, "PDEV_PHY_WARM_RESET_REASONS:\n"); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_reason_phy_m3 = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_phy_m3)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_hw_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hw_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_num_cca_rx_frame_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_num_rx_frame_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_rx_busy)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_mac_hng)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_mac_reset_converted_phy_reset = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_mac_conv_phy_reset)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_exp_cca_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_consecutive_flush9_war = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_consec_flsh_war)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_hwsch_reset_war = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hwsch_reset_war)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war = %u\n\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_hwsch_cca_wdog_war)); + + len += scnprintf(buf + len, buf_len - len, "WAL_RX_RECOVERY_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_mac_hang_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_mac_hang_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_known_sig_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_known_sig_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_no_rx_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_no_rx_consecutive_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_consec_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_rx_busy_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_rx_busy_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_phy_mac_hang_count = %u\n\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_phy_mac_hang_cnt)); + + len += scnprintf(buf + len, buf_len - len, "HTT_RX_DEST_DRAIN_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_descs_leak_prevention_done = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_leak_prevented)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_descs_saved_cnt = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_saved_cnt)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2reo_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2reo_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2fw_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2fw_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2wbm_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2wbm_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma1_2sw_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma1_2sw_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_drain_ok_mac_idle = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_drain_ok_mac_idle)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_ok_mac_not_idle = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_ok_mac_not_idle)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_prerequisite_invld = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_prerequisite_invld)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_skip_for_non_lmac_reset = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_skip_non_lmac_reset)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_hw_fifo_not_empty_post_drain_wait = %u\n\n", + le32_to_cpu(htt_buf->rx_dest_drain_hw_fifo_notempty_post_wait)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", + htt_stats_buf->hw_intr_name); + len += scnprintf(buf + len, buf_len - len, "mask = %u\n", + le32_to_cpu(htt_stats_buf->mask)); + len += scnprintf(buf + len, buf_len - len, "count = %u\n\n", + le32_to_cpu(htt_stats_buf->count)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n", + le32_to_cpu(htt_stats_buf->last_unpause_ppdu_id)); + len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_unpause_wait_tqm_write)); + len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_dummy_tlv_skipped)); + len += scnprintf(buf + len, buf_len - len, + "hwsch_misaligned_offset_received = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_misaligned_offset_received)); + len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_reset_count)); + len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_dev_reset_war)); + len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_delayed_pause)); + len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_long_delayed_pause)); + len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n", + le32_to_cpu(htt_stats_buf->sch_rx_ppdu_no_response)); + len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n", + le32_to_cpu(htt_stats_buf->sch_selfgen_response)); + len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n", + le32_to_cpu(htt_stats_buf->sch_rx_sifs_resp_trigger)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_war_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_war_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 fixed_len, array_len; + u8 i, array_words; + u32 mac_id; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word); + fixed_len = sizeof(*htt_stats_buf); + array_len = tag_len - fixed_len; + array_words = array_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_WAR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID)); + + for (i = 0; i < array_words; i++) { + len += scnprintf(buf + len, buf_len - len, "hw_war %u = %u\n\n", + i, le32_to_cpu(htt_stats_buf->hw_wars[i])); + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n", + le32_to_cpu(htt_stats_buf->max_cmdq_id)); + len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n", + le32_to_cpu(htt_stats_buf->list_mpdu_cnt_hist_intvl)); + len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n", + le32_to_cpu(htt_stats_buf->add_msdu)); + len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n", + le32_to_cpu(htt_stats_buf->q_empty)); + len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n", + le32_to_cpu(htt_stats_buf->q_not_empty)); + len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n", + le32_to_cpu(htt_stats_buf->drop_notification)); + len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n", + le32_to_cpu(htt_stats_buf->desc_threshold)); + len += scnprintf(buf + len, buf_len - len, "hwsch_tqm_invalid_status = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_tqm_invalid_status)); + len += scnprintf(buf + len, buf_len - len, "missed_tqm_gen_mpdus = %u\n", + le32_to_cpu(htt_stats_buf->missed_tqm_gen_mpdus)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates = %u\n", + le32_to_cpu(htt_stats_buf->msduq_timestamp_updates)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates_by_get_mpdu_head_info_cmd = %u\n", + le32_to_cpu(htt_stats_buf->msduq_updates_mpdu_head_info_cmd)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates_by_emp_to_nonemp_status = %u\n", + le32_to_cpu(htt_stats_buf->msduq_updates_emp_to_nonemp_status)); + len += scnprintf(buf + len, buf_len - len, + "total_get_mpdu_head_info_cmds_by_sched_algo_la_query = %u\n", + le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_query)); + len += scnprintf(buf + len, buf_len - len, + "total_get_mpdu_head_info_cmds_by_tac = %u\n", + le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_tac)); + len += scnprintf(buf + len, buf_len - len, + "total_gen_mpdu_cmds_by_sched_algo_la_query = %u\n", + le32_to_cpu(htt_stats_buf->gen_mpdu_cmds_by_query)); + len += scnprintf(buf + len, buf_len - len, "active_tqm_tids = %u\n", + le32_to_cpu(htt_stats_buf->tqm_active_tids)); + len += scnprintf(buf + len, buf_len - len, "inactive_tqm_tids = %u\n", + le32_to_cpu(htt_stats_buf->tqm_inactive_tids)); + len += scnprintf(buf + len, buf_len - len, "tqm_active_msduq_flows = %u\n", + le32_to_cpu(htt_stats_buf->tqm_active_msduq_flows)); + len += scnprintf(buf + len, buf_len - len, "hi_prio_q_not_empty = %u\n\n", + le32_to_cpu(htt_stats_buf->high_prio_q_not_empty)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n", + le32_to_cpu(htt_stats_buf->q_empty_failure)); + len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n", + le32_to_cpu(htt_stats_buf->q_not_empty_failure)); + len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n", + le32_to_cpu(htt_stats_buf->add_msdu_failure)); + + len += scnprintf(buf + len, buf_len - len, "TQM_ERROR_RESET_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "tqm_cache_ctl_err = %u\n", + le32_to_cpu(htt_stats_buf->tqm_cache_ctl_err)); + len += scnprintf(buf + len, buf_len - len, "tqm_soft_reset = %u\n", + le32_to_cpu(htt_stats_buf->tqm_soft_reset)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_total_num_in_use_link_descs = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_link_descs)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_worst_case_num_lost_link_descs = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_link_descs)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_worst_case_num_lost_host_tx_bufs_count = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_host_tx_buf_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_num_in_use_link_descs_internal_tqm = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_internal_tqm)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_num_in_use_link_descs_wbm_idle_link_ring = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_idle_link_rng)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_time_to_tqm_hang_delta_ms = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_time_to_tqm_hang_delta_ms)); + len += scnprintf(buf + len, buf_len - len, "tqm_reset_recovery_time_ms = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_recovery_time_ms)); + len += scnprintf(buf + len, buf_len - len, "tqm_reset_num_peers_hdl = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_peers_hdl)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_cumm_dirty_hw_mpduq_proc_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_mpduq_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_cumm_dirty_hw_msduq_proc = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_msduq_proc)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_su_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_su_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_other_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_other_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_trig_type = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_type)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_trig_cfg = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_cfg)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_skip_cmd_status_null = %u\n\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cmd_skp_status_null)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "gen_mpdu_end_reason", + htt_stats_buf->gen_mpdu_end_reason, num_elements, + "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "list_mpdu_end_reason", + htt_stats_buf->list_mpdu_end_reason, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n"); + len += print_array_to_buf(buf, len, "list_mpdu_cnt_hist", + htt_stats_buf->list_mpdu_cnt_hist, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_pdev_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n"); + len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n", + le32_to_cpu(htt_stats_buf->msdu_count)); + len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_count)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_ttl)); + len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n", + le32_to_cpu(htt_stats_buf->send_bar)); + len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n", + le32_to_cpu(htt_stats_buf->bar_sync)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu)); + len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n", + le32_to_cpu(htt_stats_buf->sync_cmd)); + len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n", + le32_to_cpu(htt_stats_buf->write_cmd)); + len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_trigger)); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + le32_to_cpu(htt_stats_buf->ack_tlv_proc)); + len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->gen_mpdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n", + le32_to_cpu(htt_stats_buf->gen_list_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu_tried_cmd)); + len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_queue_stats_cmd)); + len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_head_info_cmd)); + len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n", + le32_to_cpu(htt_stats_buf->msdu_flow_stats_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_ttl_cmd)); + len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n", + le32_to_cpu(htt_stats_buf->flush_cache_cmd)); + len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n", + le32_to_cpu(htt_stats_buf->update_mpduq_cmd)); + len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n", + le32_to_cpu(htt_stats_buf->enqueue)); + len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n", + le32_to_cpu(htt_stats_buf->enqueue_notify)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu_at_head)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu_state_valid)); + len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n", + le32_to_cpu(htt_stats_buf->sched_udp_notify1)); + len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n", + le32_to_cpu(htt_stats_buf->sched_udp_notify2)); + len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n", + le32_to_cpu(htt_stats_buf->sched_nonudp_notify1)); + len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n", + le32_to_cpu(htt_stats_buf->sched_nonudp_notify2)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n", + le32_to_cpu(htt_stats_buf->tcl2fw_entry_count)); + len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n", + le32_to_cpu(htt_stats_buf->not_to_fw)); + len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n", + le32_to_cpu(htt_stats_buf->invalid_pdev_vdev_peer)); + len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n", + le32_to_cpu(htt_stats_buf->tcl_res_invalid_addrx)); + len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n", + le32_to_cpu(htt_stats_buf->wbm2fw_entry_count)); + len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n", + le32_to_cpu(htt_stats_buf->invalid_pdev)); + len += scnprintf(buf + len, buf_len - len, "tcl_res_addrx_timeout = %u\n", + le32_to_cpu(htt_stats_buf->tcl_res_addrx_timeout)); + len += scnprintf(buf + len, buf_len - len, "invalid_vdev = %u\n", + le32_to_cpu(htt_stats_buf->invalid_vdev)); + len += scnprintf(buf + len, buf_len - len, "invalid_tcl_exp_frame_desc = %u\n", + le32_to_cpu(htt_stats_buf->invalid_tcl_exp_frame_desc)); + len += scnprintf(buf + len, buf_len - len, "vdev_id_mismatch_count = %u\n\n", + le32_to_cpu(htt_stats_buf->vdev_id_mismatch_cnt)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n", + le32_to_cpu(htt_stats_buf->m1_packets)); + len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n", + le32_to_cpu(htt_stats_buf->m2_packets)); + len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n", + le32_to_cpu(htt_stats_buf->m3_packets)); + len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n", + le32_to_cpu(htt_stats_buf->m4_packets)); + len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n", + le32_to_cpu(htt_stats_buf->g1_packets)); + len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n", + le32_to_cpu(htt_stats_buf->g2_packets)); + len += scnprintf(buf + len, buf_len - len, "rc4_packets = %u\n", + le32_to_cpu(htt_stats_buf->rc4_packets)); + len += scnprintf(buf + len, buf_len - len, "eap_packets = %u\n", + le32_to_cpu(htt_stats_buf->eap_packets)); + len += scnprintf(buf + len, buf_len - len, "eapol_start_packets = %u\n", + le32_to_cpu(htt_stats_buf->eapol_start_packets)); + len += scnprintf(buf + len, buf_len - len, "eapol_logoff_packets = %u\n", + le32_to_cpu(htt_stats_buf->eapol_logoff_packets)); + len += scnprintf(buf + len, buf_len - len, "eapol_encap_asf_packets = %u\n\n", + le32_to_cpu(htt_stats_buf->eapol_encap_asf_packets)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_classify_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n", + le32_to_cpu(htt_stats_buf->arp_packets)); + len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n", + le32_to_cpu(htt_stats_buf->igmp_packets)); + len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n", + le32_to_cpu(htt_stats_buf->dhcp_packets)); + len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n", + le32_to_cpu(htt_stats_buf->host_inspected)); + len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n", + le32_to_cpu(htt_stats_buf->htt_included)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_mcs)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_nss)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_preamble_type)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_chainmask)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_guard_interval)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_retries)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_bw_info)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_power)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n", + le32_to_cpu(htt_stats_buf->htt_valid_key_flags)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_no_encryption)); + len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n", + le32_to_cpu(htt_stats_buf->fse_entry_count)); + len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n", + le32_to_cpu(htt_stats_buf->fse_priority_be)); + len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n", + le32_to_cpu(htt_stats_buf->fse_priority_high)); + len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n", + le32_to_cpu(htt_stats_buf->fse_priority_low)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_be)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_over_sub)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_bursty)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_interactive)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_periodic)); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n", + le32_to_cpu(htt_stats_buf->fse_hwqueue_alloc)); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n", + le32_to_cpu(htt_stats_buf->fse_hwqueue_created)); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n", + le32_to_cpu(htt_stats_buf->fse_hwqueue_send_to_host)); + len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n", + le32_to_cpu(htt_stats_buf->mcast_entry)); + len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n", + le32_to_cpu(htt_stats_buf->bcast_entry)); + len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n", + le32_to_cpu(htt_stats_buf->htt_update_peer_cache)); + len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n", + le32_to_cpu(htt_stats_buf->htt_learning_frame)); + len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n", + le32_to_cpu(htt_stats_buf->fse_invalid_peer)); + len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n", + le32_to_cpu(htt_stats_buf->mec_notify)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n", + le32_to_cpu(htt_stats_buf->ap_bss_peer_not_found)); + len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n", + le32_to_cpu(htt_stats_buf->ap_bcast_mcast_no_peer)); + len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n", + le32_to_cpu(htt_stats_buf->sta_delete_in_progress)); + len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n", + le32_to_cpu(htt_stats_buf->ibss_no_bss_peer)); + len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n", + le32_to_cpu(htt_stats_buf->invalid_vdev_type)); + len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n", + le32_to_cpu(htt_stats_buf->invalid_ast_peer_entry)); + len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n", + le32_to_cpu(htt_stats_buf->peer_entry_invalid)); + len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n", + le32_to_cpu(htt_stats_buf->ethertype_not_ip)); + len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n", + le32_to_cpu(htt_stats_buf->eapol_lookup_failed)); + len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n", + le32_to_cpu(htt_stats_buf->qpeer_not_allow_data)); + len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n", + le32_to_cpu(htt_stats_buf->fse_tid_override)); + len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n", + le32_to_cpu(htt_stats_buf->ipv6_jumbogram_zero_length)); + len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n", + le32_to_cpu(htt_stats_buf->qos_to_non_qos_in_prog)); + len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_eapol = %u\n", + le32_to_cpu(htt_stats_buf->ap_bcast_mcast_eapol)); + len += scnprintf(buf + len, buf_len - len, "unicast_on_ap_bss_peer = %u\n", + le32_to_cpu(htt_stats_buf->unicast_on_ap_bss_peer)); + len += scnprintf(buf + len, buf_len - len, "ap_vdev_invalid = %u\n", + le32_to_cpu(htt_stats_buf->ap_vdev_invalid)); + len += scnprintf(buf + len, buf_len - len, "incomplete_llc = %u\n", + le32_to_cpu(htt_stats_buf->incomplete_llc)); + len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m3 = %u\n", + le32_to_cpu(htt_stats_buf->eapol_duplicate_m3)); + len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m4 = %u\n\n", + le32_to_cpu(htt_stats_buf->eapol_duplicate_m4)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "eok = %u\n", + le32_to_cpu(htt_stats_buf->eok)); + len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n", + le32_to_cpu(htt_stats_buf->classify_done)); + len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n", + le32_to_cpu(htt_stats_buf->lookup_failed)); + len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n", + le32_to_cpu(htt_stats_buf->send_host_dhcp)); + len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n", + le32_to_cpu(htt_stats_buf->send_host_mcast)); + len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n", + le32_to_cpu(htt_stats_buf->send_host_unknown_dest)); + len += scnprintf(buf + len, buf_len - len, "send_host = %u\n", + le32_to_cpu(htt_stats_buf->send_host)); + len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n", + le32_to_cpu(htt_stats_buf->status_invalid)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n", + le32_to_cpu(htt_stats_buf->enqueued_pkts)); + len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n", + le32_to_cpu(htt_stats_buf->to_tqm)); + len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n", + le32_to_cpu(htt_stats_buf->to_tqm_bypass)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n", + le32_to_cpu(htt_stats_buf->discarded_pkts)); + len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n", + le32_to_cpu(htt_stats_buf->local_frames)); + len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n", + le32_to_cpu(htt_stats_buf->is_ext_msdu)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_compl_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n", + le32_to_cpu(htt_stats_buf->tcl_dummy_frame)); + len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n", + le32_to_cpu(htt_stats_buf->tqm_dummy_frame)); + len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n", + le32_to_cpu(htt_stats_buf->tqm_notify_frame)); + len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n", + le32_to_cpu(htt_stats_buf->fw2wbm_enq)); + len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n", + le32_to_cpu(htt_stats_buf->tqm_bypass_frame)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n", + le32_to_cpu(htt_stats_buf->su_bar)); + len += scnprintf(buf + len, buf_len - len, "rts = %u\n", + le32_to_cpu(htt_stats_buf->rts)); + len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n", + le32_to_cpu(htt_stats_buf->cts2self)); + len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n", + le32_to_cpu(htt_stats_buf->qos_null)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_1)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_2)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_3)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_4)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_5)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_6)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n", + le32_to_cpu(htt_stats_buf->delayed_bar_7)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->ac_su_ndpa)); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->ac_su_ndp)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndpa)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp)); + len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1, + htt_stats_buf->ac_mu_mimo_brpoll, + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1, + "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->ax_su_ndpa)); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->ax_su_ndp)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndpa)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp)); + len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1, + htt_stats_buf->ax_mu_mimo_brpoll, + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n"); + len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n", + le32_to_cpu(htt_stats_buf->ax_basic_trigger)); + len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n", + le32_to_cpu(htt_stats_buf->ax_ulmumimo_trigger)); + len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n", + le32_to_cpu(htt_stats_buf->ax_bsr_trigger)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_bar_trigger)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n", + le32_to_cpu(htt_stats_buf->ax_mu_rts_trigger)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_be_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_be_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_BE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_queued = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndpa_queued)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndpa)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndp_queued = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndp_queued)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndp)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_queued = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_queued)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_queued = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_queued)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp)); + len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_queued = ", 1, + htt_stats_buf->be_mu_mimo_brpoll_queued, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1, + "\n"); + len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_tried = ", 1, + htt_stats_buf->be_mu_mimo_brpoll, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1, + "\n"); + len += print_array_to_buf(buf, len, "be_ul_mumimo_trigger = ", + htt_stats_buf->be_ul_mumimo_trigger, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n"); + len += scnprintf(buf + len, buf_len - len, "be_basic_trigger = %u\n", + le32_to_cpu(htt_stats_buf->be_basic_trigger)); + len += scnprintf(buf + len, buf_len - len, "be_ulmumimo_total_trigger = %u\n", + le32_to_cpu(htt_stats_buf->be_ulmumimo_trigger)); + len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger = %u\n", + le32_to_cpu(htt_stats_buf->be_bsr_trigger)); + len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_bar_trigger)); + len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger = %u\n\n", + le32_to_cpu(htt_stats_buf->be_mu_rts_trigger)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_su_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_su_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp1_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp2_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp3_err)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_su_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_su_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp_err)); + len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpX_err", 1, + htt_stats_buf->ax_mu_mimo_brp_err, + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, + "\n"); + len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_basic_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_ulmumimo_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_bsr_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_bar_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n", + le32_to_cpu(htt_stats_buf->ax_mu_rts_trigger_err)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_be_err_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_be_err_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_BE_ERR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "be_su_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndp_flushed = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndp_flushed)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_flushed = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndpa_flushed)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_flushed = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_flushed)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_flushed = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_flushed)); + len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpX_err", 1, + htt_stats_buf->be_mu_mimo_brp_err, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1, + "\n"); + len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_flushed", 1, + htt_stats_buf->be_mu_mimo_brpoll_flushed, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1, + "\n"); + len += print_array_to_buf(buf, len, "be_mu_mimo_num_cbf_rcvd_on_brp_err", + htt_stats_buf->be_mu_mimo_brp_err_num_cbf_rxd, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_ul_mumimo_trigger_err", + htt_stats_buf->be_ul_mumimo_trigger_err, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n"); + len += scnprintf(buf + len, buf_len - len, "be_basic_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->be_basic_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "be_ulmumimo_total_trig_err = %u\n", + le32_to_cpu(htt_stats_buf->be_ulmumimo_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->be_bsr_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_bar_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger_err = %u\n\n", + le32_to_cpu(htt_stats_buf->be_mu_rts_trigger_err)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ac_sched_status_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats) +{ + const struct ath12k_htt_tx_selfgen_ac_sched_status_stats_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats->buf; + u32 len = stats->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_SELFGEN_AC_SCHED_STATUS_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "ac_su_ndpa_sch_status", + htt_stats_buf->ac_su_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_su_ndp_sch_status", + htt_stats_buf->ac_su_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_ndpa_sch_status", + htt_stats_buf->ac_mu_mimo_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_ndp_sch_status", + htt_stats_buf->ac_mu_mimo_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_brp_sch_status", + htt_stats_buf->ac_mu_mimo_brp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_su_ndp_sch_flag_err", + htt_stats_buf->ac_su_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_ndp_sch_flag_err", + htt_stats_buf->ac_mu_mimo_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_brp_sch_flag_err", + htt_stats_buf->ac_mu_mimo_brp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n"); + + stats->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ax_sched_status_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats) +{ + const struct ath12k_htt_tx_selfgen_ax_sched_status_stats_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats->buf; + u32 len = stats->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_SELFGEN_AX_SCHED_STATUS_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "ax_su_ndpa_sch_status", + htt_stats_buf->ax_su_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_su_ndp_sch_status", + htt_stats_buf->ax_su_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_mimo_ndpa_sch_status", + htt_stats_buf->ax_mu_mimo_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_mimo_ndp_sch_status", + htt_stats_buf->ax_mu_mimo_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_brp_sch_status", + htt_stats_buf->ax_mu_brp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_bar_sch_status", + htt_stats_buf->ax_mu_bar_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_basic_trig_sch_status", + htt_stats_buf->ax_basic_trig_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_su_ndp_sch_flag_err", + htt_stats_buf->ax_su_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_mimo_ndp_sch_flag_err", + htt_stats_buf->ax_mu_mimo_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_brp_sch_flag_err", + htt_stats_buf->ax_mu_brp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_bar_sch_flag_err", + htt_stats_buf->ax_mu_bar_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_basic_trig_sch_flag_err", + htt_stats_buf->ax_basic_trig_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_ulmumimo_trig_sch_status", + htt_stats_buf->ax_ulmumimo_trig_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_ulmumimo_trig_sch_flag_err", + htt_stats_buf->ax_ulmumimo_trig_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n"); + + stats->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_be_sched_status_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats) +{ + const struct ath12k_htt_tx_selfgen_be_sched_status_stats_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats->buf; + u32 len = stats->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_SELFGEN_BE_SCHED_STATUS_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "be_su_ndpa_sch_status", + htt_stats_buf->be_su_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_su_ndp_sch_status", + htt_stats_buf->be_su_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_mimo_ndpa_sch_status", + htt_stats_buf->be_mu_mimo_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_mimo_ndp_sch_status", + htt_stats_buf->be_mu_mimo_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_brp_sch_status", + htt_stats_buf->be_mu_brp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_bar_sch_status", + htt_stats_buf->be_mu_bar_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_basic_trig_sch_status", + htt_stats_buf->be_basic_trig_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_su_ndp_sch_flag_err", + htt_stats_buf->be_su_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_mimo_ndp_sch_flag_err", + htt_stats_buf->be_mu_mimo_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_brp_sch_flag_err", + htt_stats_buf->be_mu_brp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_bar_sch_flag_err", + htt_stats_buf->be_mu_bar_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_basic_trig_sch_flag_err", + htt_stats_buf->be_basic_trig_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_basic_trig_sch_flag_err", + htt_stats_buf->be_basic_trig_sch_flag_err, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_ulmumimo_trig_sch_flag_err", + htt_stats_buf->be_ulmumimo_trig_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n"); + + stats->buf_len = len; +} + +static void +ath12k_htt_print_stats_string_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_stats_string_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 i; + u16 index = 0; + u32 datum; + char data[ATH12K_HTT_MAX_STRING_LEN] = {0}; + + tag_len = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n"); + for (i = 0; i < tag_len; i++) { + datum = __le32_to_cpu(htt_stats_buf->data[i]); + index += scnprintf(&data[index], ATH12K_HTT_MAX_STRING_LEN - index, + "%.*s", 4, (char *)&datum); + if (index >= ATH12K_HTT_MAX_STRING_LEN) + break; + } + len += scnprintf(buf + len, buf_len - len, "data = %s\n\n", data); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sring_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sring_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + u32 avail_words; + u32 head_tail_ptr; + u32 sring_stat; + u32 tail_ptr; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__ring_id__arena__ep); + avail_words = __le32_to_cpu(htt_stats_buf->num_avail_words__num_valid_words); + head_tail_ptr = __le32_to_cpu(htt_stats_buf->head_ptr__tail_ptr); + sring_stat = __le32_to_cpu(htt_stats_buf->consumer_empty__producer_full); + tail_ptr = __le32_to_cpu(htt_stats_buf->prefetch_count__internal_tail_ptr); + + len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "ring_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_RING_ID)); + len += scnprintf(buf + len, buf_len - len, "arena = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_ARENA)); + len += scnprintf(buf + len, buf_len - len, "ep = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_EP)); + len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n", + le32_to_cpu(htt_stats_buf->base_addr_lsb)); + len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n", + le32_to_cpu(htt_stats_buf->base_addr_msb)); + len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n", + le32_to_cpu(htt_stats_buf->ring_size)); + len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n", + le32_to_cpu(htt_stats_buf->elem_size)); + len += scnprintf(buf + len, buf_len - len, "num_avail_words = %u\n", + u32_get_bits(avail_words, + ATH12K_HTT_SRING_STATS_NUM_AVAIL_WORDS)); + len += scnprintf(buf + len, buf_len - len, "num_valid_words = %u\n", + u32_get_bits(avail_words, + ATH12K_HTT_SRING_STATS_NUM_VALID_WORDS)); + len += scnprintf(buf + len, buf_len - len, "head_ptr = %u\n", + u32_get_bits(head_tail_ptr, ATH12K_HTT_SRING_STATS_HEAD_PTR)); + len += scnprintf(buf + len, buf_len - len, "tail_ptr = %u\n", + u32_get_bits(head_tail_ptr, ATH12K_HTT_SRING_STATS_TAIL_PTR)); + len += scnprintf(buf + len, buf_len - len, "consumer_empty = %u\n", + u32_get_bits(sring_stat, + ATH12K_HTT_SRING_STATS_CONSUMER_EMPTY)); + len += scnprintf(buf + len, buf_len - len, "producer_full = %u\n", + u32_get_bits(head_tail_ptr, + ATH12K_HTT_SRING_STATS_PRODUCER_FULL)); + len += scnprintf(buf + len, buf_len - len, "prefetch_count = %u\n", + u32_get_bits(tail_ptr, ATH12K_HTT_SRING_STATS_PREFETCH_COUNT)); + len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %u\n\n", + u32_get_bits(tail_ptr, + ATH12K_HTT_SRING_STATS_INTERNAL_TAIL_PTR)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sfm_cmn_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sfm_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n", + le32_to_cpu(htt_stats_buf->buf_total)); + len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n", + le32_to_cpu(htt_stats_buf->mem_empty)); + len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n", + le32_to_cpu(htt_stats_buf->deallocate_bufs)); + len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n", + le32_to_cpu(htt_stats_buf->num_records)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sfm_client_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sfm_client_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "client_id = %u\n", + le32_to_cpu(htt_stats_buf->client_id)); + len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n", + le32_to_cpu(htt_stats_buf->buf_min)); + len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n", + le32_to_cpu(htt_stats_buf->buf_max)); + len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n", + le32_to_cpu(htt_stats_buf->buf_busy)); + len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n", + le32_to_cpu(htt_stats_buf->buf_alloc)); + len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n", + le32_to_cpu(htt_stats_buf->buf_avail)); + len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n", + le32_to_cpu(htt_stats_buf->num_users)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sfm_client_user_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sfm_client_user_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV:\n"); + len += print_array_to_buf(buf, len, "dwords_used_by_user_n", + htt_stats_buf->dwords_used_by_user_n, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 i; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_mimo_sch_posted)); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n", + le32_to_cpu(htt_stats_buf->mu_mimo_sch_failed)); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_mimo_ppdu_posted)); + len += scnprintf(buf + len, buf_len - len, + "\nac_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0, + le32_to_cpu(htt_stats_buf->ac_mu_mimo_per_grp_sz[0])); + for (i = 1; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_sch_posted_per_group_index %u ", i); + len += scnprintf(buf + len, buf_len - len, + "(TOTAL STREAMS = %u) = %u\n", i + 1, + le32_to_cpu(htt_stats_buf->ac_mu_mimo_per_grp_sz[i])); + } + + for (i = 0; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_sch_posted_per_group_index %u ", + i + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS); + len += scnprintf(buf + len, buf_len - len, + "(TOTAL STREAMS = %u) = %u\n", + i + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS + 1, + le32_to_cpu(htt_stats_buf->ac_mu_mimo_grp_sz_ext[i])); + } + + len += scnprintf(buf + len, buf_len - len, + "\nax_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0, + le32_to_cpu(htt_stats_buf->ax_mu_mimo_per_grp_sz[0])); + for (i = 1; i < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_sch_posted_per_group_index %u ", i); + len += scnprintf(buf + len, buf_len - len, + "(TOTAL STREAMS = %u) = %u\n", i + 1, + le32_to_cpu(htt_stats_buf->ax_mu_mimo_per_grp_sz[i])); + } + + len += scnprintf(buf + len, buf_len - len, + "\nbe_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0, + le32_to_cpu(htt_stats_buf->be_mu_mimo_per_grp_sz[0])); + for (i = 1; i < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "be_mu_mimo_sch_posted_per_group_index %u ", i); + len += scnprintf(buf + len, buf_len - len, + "(TOTAL STREAMS = %u) = %u\n", i + 1, + le32_to_cpu(htt_stats_buf->be_mu_mimo_per_grp_sz[i])); + } + + len += scnprintf(buf + len, buf_len - len, "\n11ac MU_MIMO SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_sch_nusers_"); + len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ac_mu_mimo_sch_nusers[i])); + } + + len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_sch_nusers_"); + len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_mu_mimo_sch_nusers[i])); + } + + len += scnprintf(buf + len, buf_len - len, "\n11be MU_MIMO SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_sch_nusers_"); + len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i, + le32_to_cpu(htt_stats_buf->be_mu_mimo_sch_nusers[i])); + } + + len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_OFDMA_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ofdma_sch_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_ofdma_basic_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_ofdma_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_ofdma_bsr_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_ofdma_bar_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_ofdma_bar_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_ofdma_brp_sch_nusers_%u = %u\n\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_ofdma_brp_nusers[i])); + } + + len += scnprintf(buf + len, buf_len - len, "11ax UL MUMIMO SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_ul_mumimo_basic_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_mumimo_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_mumimo_brp_sch_nusers_%u = %u\n\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_mumimo_brp_nusers[i])); + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mumimo_grp_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + int j; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_MUMIMO_GRP_STATS:\n"); + len += print_array_to_buf(buf, len, + "dl_mumimo_grp_tputs_observed (per bin = 300 mbps)", + htt_stats_buf->dl_mumimo_grp_tputs, + ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n"); + len += print_array_to_buf(buf, len, "dl_mumimo_grp eligible", + htt_stats_buf->dl_mumimo_grp_eligible, + ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n"); + len += print_array_to_buf(buf, len, "dl_mumimo_grp_ineligible", + htt_stats_buf->dl_mumimo_grp_ineligible, + ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n"); + len += scnprintf(buf + len, buf_len - len, "dl_mumimo_grp_invalid:\n"); + for (j = 0; j < ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ; j++) { + len += scnprintf(buf + len, buf_len - len, "grp_id = %u", j); + len += print_array_to_buf(buf, len, "", + htt_stats_buf->dl_mumimo_grp_invalid, + ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE, + "\n"); + } + + len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size", + htt_stats_buf->ul_mumimo_grp_best_grp_size, + ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n"); + len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ", + htt_stats_buf->ul_mumimo_grp_best_usrs, + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n"); + len += print_array_to_buf(buf, len, + "ul_mumimo_grp_tputs_observed (per bin = 300 mbps)", + htt_stats_buf->ul_mumimo_grp_tputs, + ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 user_index; + u32 tx_sched_mode; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + user_index = __le32_to_cpu(htt_stats_buf->user_index); + tx_sched_mode = __le32_to_cpu(htt_stats_buf->tx_sched_mode); + + if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) { + if (!user_index) + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n"); + + if (user_index < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS) { + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_queued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_queued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_tried_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_tried_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_failed_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_failed_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_requeued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_err_no_ba_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->err_no_ba_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdu_underrun_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n", + user_index, + le32_to_cpu(htt_stats_buf->ampdu_underrun_usr)); + } + } + + if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) { + if (!user_index) + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n"); + + if (user_index < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS) { + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_queued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_queued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_tried_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_tried_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_failed_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_failed_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_requeued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_err_no_ba_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->err_no_ba_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdu_underrun_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n", + user_index, + le32_to_cpu(htt_stats_buf->ampdu_underrun_usr)); + } + } + + if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) { + if (!user_index) + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n"); + + if (user_index < ATH12K_HTT_TX_NUM_OFDMA_USER_STATS) { + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_queued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_tried_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_failed_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_requeued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_err_no_ba_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->err_no_ba_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdu_underrun_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n", + user_index, + le32_to_cpu(htt_stats_buf->ampdu_underrun_usr)); + } + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_CCA_STATS_HIST_TLV :\n"); + len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n", + le32_to_cpu(htt_stats_buf->chan_num)); + len += scnprintf(buf + len, buf_len - len, "num_records = %u\n", + le32_to_cpu(htt_stats_buf->num_records)); + len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n", + le32_to_cpu(htt_stats_buf->valid_cca_counters_bitmap)); + len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n", + le32_to_cpu(htt_stats_buf->collection_interval)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n"); + len += scnprintf(buf + len, buf_len - len, "tx_frame_usec = %u\n", + le32_to_cpu(htt_stats_buf->tx_frame_usec)); + len += scnprintf(buf + len, buf_len - len, "rx_frame_usec = %u\n", + le32_to_cpu(htt_stats_buf->rx_frame_usec)); + len += scnprintf(buf + len, buf_len - len, "rx_clear_usec = %u\n", + le32_to_cpu(htt_stats_buf->rx_clear_usec)); + len += scnprintf(buf + len, buf_len - len, "my_rx_frame_usec = %u\n", + le32_to_cpu(htt_stats_buf->my_rx_frame_usec)); + len += scnprintf(buf + len, buf_len - len, "usec_cnt = %u\n", + le32_to_cpu(htt_stats_buf->usec_cnt)); + len += scnprintf(buf + len, buf_len - len, "med_rx_idle_usec = %u\n", + le32_to_cpu(htt_stats_buf->med_rx_idle_usec)); + len += scnprintf(buf + len, buf_len - len, "med_tx_idle_global_usec = %u\n", + le32_to_cpu(htt_stats_buf->med_tx_idle_global_usec)); + len += scnprintf(buf + len, buf_len - len, "cca_obss_usec = %u\n\n", + le32_to_cpu(htt_stats_buf->cca_obss_usec)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 i; + static const char *access_cat_names[ATH12K_HTT_NUM_AC_WMM] = {"best effort", + "background", + "video", "voice"}; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "num_spatial_reuse_tx = %u\n", + le32_to_cpu(htt_stats_buf->num_sr_tx_transmissions)); + len += scnprintf(buf + len, buf_len - len, + "num_spatial_reuse_opportunities = %u\n", + le32_to_cpu(htt_stats_buf->num_spatial_reuse_opportunities)); + len += scnprintf(buf + len, buf_len - len, "num_non_srg_opportunities = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_opportunities)); + len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_ppdu_tried)); + len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_success = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_ppdu_success)); + len += scnprintf(buf + len, buf_len - len, "num_srg_opportunities = %u\n", + le32_to_cpu(htt_stats_buf->num_srg_opportunities)); + len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_srg_ppdu_tried)); + len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_success = %u\n", + le32_to_cpu(htt_stats_buf->num_srg_ppdu_success)); + len += scnprintf(buf + len, buf_len - len, "num_psr_opportunities = %u\n", + le32_to_cpu(htt_stats_buf->num_psr_opportunities)); + len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_psr_ppdu_tried)); + len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_success = %u\n", + le32_to_cpu(htt_stats_buf->num_psr_ppdu_success)); + len += scnprintf(buf + len, buf_len - len, "min_duration_check_flush_cnt = %u\n", + le32_to_cpu(htt_stats_buf->num_obss_min_dur_check_flush_cnt)); + len += scnprintf(buf + len, buf_len - len, "sr_ppdu_abort_flush_cnt = %u\n\n", + le32_to_cpu(htt_stats_buf->num_sr_ppdu_abort_flush_cnt)); + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_PER_AC_STATS:\n"); + for (i = 0; i < ATH12K_HTT_NUM_AC_WMM; i++) { + len += scnprintf(buf + len, buf_len - len, "Access Category %u (%s)\n", + i, access_cat_names[i]); + len += scnprintf(buf + len, buf_len - len, + "num_non_srg_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_tried_per_ac[i])); + len += scnprintf(buf + len, buf_len - len, + "num_non_srg_ppdu_success = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_success_ac[i])); + len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_srg_tried_per_ac[i])); + len += scnprintf(buf + len, buf_len - len, + "num_srg_ppdu_success = %u\n\n", + le32_to_cpu(htt_stats_buf->num_srg_success_per_ac[i])); + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u8 i; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "Legacy OFDM Rates: 6 Mbps: %u, ", + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0])); + len += scnprintf(buf + len, buf_len - len, "9 Mbps: %u, 12 Mbps: %u, ", + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2])); + len += scnprintf(buf + len, buf_len - len, "18 Mbps: %u\n", + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3])); + len += scnprintf(buf + len, buf_len - len, "24 Mbps: %u, 36 Mbps: %u, ", + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5])); + len += scnprintf(buf + len, buf_len - len, "48 Mbps: %u, 54 Mbps: %u\n", + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7])); + + len += print_array_to_buf(buf, len, "tx_ol_mcs", htt_stats_buf->tx_su_ol_mcs, + ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "tx_ibf_mcs", htt_stats_buf->tx_su_ibf_mcs, + ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "tx_txbf_mcs", htt_stats_buf->tx_su_txbf_mcs, + ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf_index(buf, len, "tx_ol_nss", 1, + htt_stats_buf->tx_su_ol_nss, + ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, + "\n"); + len += print_array_to_buf_index(buf, len, "tx_ibf_nss", 1, + htt_stats_buf->tx_su_ibf_nss, + ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, + "\n"); + len += print_array_to_buf_index(buf, len, "tx_txbf_nss", 1, + htt_stats_buf->tx_su_txbf_nss, + ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, + "\n"); + len += print_array_to_buf(buf, len, "tx_ol_bw", htt_stats_buf->tx_su_ol_bw, + ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n"); + for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++) + len += print_array_to_buf(buf, len, i ? "quarter_tx_ol_bw" : + "half_tx_ol_bw", + htt_stats_buf->ol[i], + ATH12K_HTT_TXBF_NUM_BW_CNTRS, + "\n"); + + len += print_array_to_buf(buf, len, "tx_ibf_bw", htt_stats_buf->tx_su_ibf_bw, + ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n"); + for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++) + len += print_array_to_buf(buf, len, i ? "quarter_tx_ibf_bw" : + "half_tx_ibf_bw", + htt_stats_buf->ibf[i], + ATH12K_HTT_TXBF_NUM_BW_CNTRS, + "\n"); + + len += print_array_to_buf(buf, len, "tx_txbf_bw", htt_stats_buf->tx_su_txbf_bw, + ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n"); + for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++) + len += print_array_to_buf(buf, len, i ? "quarter_tx_txbf_bw" : + "half_tx_txbf_bw", + htt_stats_buf->txbf[i], + ATH12K_HTT_TXBF_NUM_BW_CNTRS, + "\n"); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, + "HTT_STATS_PDEV_TXBF_FLAG_RETURN_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "TXBF_reason_code_stats: 0:%u, 1:%u,", + le32_to_cpu(htt_stats_buf->txbf_flag_set_mu_mode), + le32_to_cpu(htt_stats_buf->txbf_flag_set_final_status)); + len += scnprintf(buf + len, buf_len - len, " 2:%u, 3:%u, 4:%u, 5:%u, ", + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_verified_txbf_mode), + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_p2p_access), + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_max_nss_in_he160), + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_uldlofdma)); + len += scnprintf(buf + len, buf_len - len, "6:%u, 7:%u\n\n", + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_mcs_threshold_val), + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_final_status)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv *stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 num_elements; + u8 i; + + if (tag_len < sizeof(*stats_buf)) + return; + + num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndpa_arr); + + len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDPA_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_tried ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_flushed ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flush)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_err ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv *stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 num_elements; + u8 i; + + if (tag_len < sizeof(*stats_buf)) + return; + + num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndp_arr); + + len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDP_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_queued)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_tried ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_tried)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_flushed ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_flush)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_err ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_err)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv *stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 num_elements; + u8 i; + + if (tag_len < sizeof(*stats_buf)) + return; + + num_elements = le32_to_cpu(stats_buf->num_elems_ax_brp_arr); + + len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_BRP_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_queued)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_tied ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_tried)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_flushed ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_flushed)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_err)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err_num_cbf_rcvd ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_num_cbf_rcvd)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv *stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 num_elements; + u8 i; + + if (tag_len < sizeof(*stats_buf)) + return; + + num_elements = le32_to_cpu(stats_buf->num_elems_ax_steer_arr); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TXBF_OFDMA_AX_STEER_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_steer[i].num_ppdu_steer)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_prefetch ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_steer[i].num_usr_prefetch)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_sound ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_steer[i].num_usr_sound)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_force_sound ="); + for (i = 0; i < num_elements; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, + le32_to_cpu(stats_buf->ax_steer[i].num_usr_force_sound)); + len--; + *(buf + len) = '\0'; + + len += scnprintf(buf + len, buf_len - len, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv *stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TXBF_OFDMA_AX_STEER_MPDU_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n", + le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_tried)); + len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n", + le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_failed)); + len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n", + le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_tried)); + len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n\n", + le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_failed)); + + stats_req->buf_len = len; +} + +static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info, + int idx, char *str_buf) +{ + u64 page_timestamp; + u16 index = 0; + + page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb); + + index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index, + "Index - %u ; Page Number - %u ; ", + idx, le32_to_cpu(pg_info->page_num)); + index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index, + "Num of pages - %u ; Timestamp - %lluus\n", + le32_to_cpu(pg_info->num_pgs), page_timestamp); +} + +static void +ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 dword_lock, dword_unlock; + int i; + u8 *buf = stats_req->buf; + u8 pg_locked; + u8 pg_unlock; + char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0}; + + if (tag_len < sizeof(*stat_buf)) + return; + + dword_lock = le32_get_bits(stat_buf->info2, + ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2); + dword_unlock = le32_get_bits(stat_buf->info2, + ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2); + + pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED; + pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED; + + len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n", + le32_get_bits(stat_buf->info0, + ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0)); + len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n", + le32_get_bits(stat_buf->info0, + ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0)); + len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n", + le32_get_bits(stat_buf->info1, + ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1)); + len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n", + le32_get_bits(stat_buf->info1, + ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1)); + + len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n"); + len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n", + dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1)); + + for (i = 0; i < ATH12K_PAGER_MAX; i++) { + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); + ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i], + i, str_buf); + len += scnprintf(buf + len, buf_len - len, "%s", str_buf); + } + + len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n"); + len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n", + dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1); + + for (i = 0; i < ATH12K_PAGER_MAX; i++) { + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); + ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i], + i, str_buf); + len += scnprintf(buf + len, buf_len - len, "%s", str_buf); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 *buf = stats_req->buf, i; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n"); + for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++) + len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n", + i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i])); + for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++) + len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n", + i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i])); + len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n", + le32_to_cpu(htt_stats_buf->false_radar_cnt), + le32_to_cpu(htt_stats_buf->fw_run_time)); + len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n", + le32_to_cpu(htt_stats_buf->radar_cs_cnt)); + len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n", + a_sle32_to_cpu(htt_stats_buf->ani_level)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n", + le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt)); + len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n", + le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt)); + len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n", + le32_to_cpu(htt_stats_buf->mactx_abort_cnt)); + len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n", + le32_to_cpu(htt_stats_buf->macrx_abort_cnt)); + len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n", + le32_to_cpu(htt_stats_buf->phytx_abort_cnt)); + len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n", + le32_to_cpu(htt_stats_buf->phyrx_abort_cnt)); + len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n", + le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt)); + len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n", + le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt)); + len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n", + le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt)); + len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt, + ATH12K_HTT_MAX_RX_PKT_CNT, "\n"); + len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt", + htt_stats_buf->rx_pkt_crc_pass_cnt, + ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n"); + len += print_array_to_buf(buf, len, "per_blk_err_cnt", + htt_stats_buf->per_blk_err_cnt, + ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n"); + len += print_array_to_buf(buf, len, "rx_ota_err_cnt", + htt_stats_buf->rx_ota_err_cnt, + ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", + le32_to_cpu(htt_stats_buf->pdev_id)); + len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n", + le32_to_cpu(htt_stats_buf->chan_mhz)); + len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n", + le32_to_cpu(htt_stats_buf->chan_band_center_freq1)); + len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n", + le32_to_cpu(htt_stats_buf->chan_band_center_freq2)); + len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n", + le32_to_cpu(htt_stats_buf->chan_phy_mode)); + len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n", + le32_to_cpu(htt_stats_buf->chan_flags)); + len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n", + le32_to_cpu(htt_stats_buf->chan_num)); + len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n", + le32_to_cpu(htt_stats_buf->reset_cause)); + len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n", + le32_to_cpu(htt_stats_buf->prev_reset_cause)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n", + le32_to_cpu(htt_stats_buf->phy_warm_reset_src)); + len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n", + le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode)); + len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n", + le32_to_cpu(htt_stats_buf->xbar_val)); + len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n", + le32_to_cpu(htt_stats_buf->force_calibration)); + len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n", + le32_to_cpu(htt_stats_buf->phyrf_mode)); + len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n", + le32_to_cpu(htt_stats_buf->phy_homechan)); + len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n", + le32_to_cpu(htt_stats_buf->phy_tx_ch_mask)); + len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n", + le32_to_cpu(htt_stats_buf->phy_rx_ch_mask)); + len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n", + le32_to_cpu(htt_stats_buf->phybb_ini_mask)); + len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n", + le32_to_cpu(htt_stats_buf->phyrf_ini_mask)); + len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n", + le32_to_cpu(htt_stats_buf->phy_dfs_en_mask)); + len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n", + le32_to_cpu(htt_stats_buf->phy_sscan_en_mask)); + len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n", + le32_to_cpu(htt_stats_buf->phy_synth_sel_mask)); + len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n", + le32_to_cpu(htt_stats_buf->phy_adfs_freq)); + len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n", + le32_to_cpu(htt_stats_buf->cck_fir_settings)); + len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n", + le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan)); + len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n", + le32_to_cpu(htt_stats_buf->cca_thresh)); + len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n", + le32_to_cpu(htt_stats_buf->dyn_cca_status)); + len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n", + le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw)); + len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n", + le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", + le32_to_cpu(htt_stats_buf->pdev_id)); + len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n", + le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt)); + len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n", + le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt)); + len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n", + le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt)); + len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n", + le32_to_cpu(htt_stats_buf->force_calibration_cnt)); + len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n", + le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt)); + len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n", + le32_to_cpu(htt_stats_buf->temperature_recal_cnt)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", + le32_to_cpu(htt_stats_buf->pdev_id)); + len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n", + le32_to_cpu(htt_stats_buf->tx_power_scale)); + len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n", + le32_to_cpu(htt_stats_buf->tx_power_scale_db)); + len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n", + le32_to_cpu(htt_stats_buf->min_negative_tx_power)); + len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n", + le32_to_cpu(htt_stats_buf->reg_ctl_domain)); + len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n", + le32_to_cpu(htt_stats_buf->twice_max_rd_power)); + len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n", + le32_to_cpu(htt_stats_buf->max_tx_power)); + len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n", + le32_to_cpu(htt_stats_buf->home_max_tx_power)); + len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n", + le32_to_cpu(htt_stats_buf->psd_power)); + len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n", + le32_to_cpu(htt_stats_buf->eirp_power)); + len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n", + le32_to_cpu(htt_stats_buf->power_type_6ghz)); + len += print_array_to_buf(buf, len, "max_reg_allowed_power", + htt_stats_buf->max_reg_allowed_power, + ATH12K_HTT_STATS_MAX_CHAINS, "\n"); + len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz", + htt_stats_buf->max_reg_allowed_power_6ghz, + ATH12K_HTT_STATS_MAX_CHAINS, "\n"); + len += print_array_to_buf(buf, len, "sub_band_cfreq", + htt_stats_buf->sub_band_cfreq, + ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n"); + len += print_array_to_buf(buf, len, "sub_band_txpower", + htt_stats_buf->sub_band_txpower, + ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf; + u64 drop_count; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi, + htt_stats_buf->inv_peers_msdu_drop_count_lo); + + len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n", + drop_count); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 ru_size_cnt = 0; + u32 rc_mode, ru_type; + u8 *buf = stats_req->buf, i; + const char *mode_prefix; + + if (tag_len < sizeof(*stats_buf)) + return; + + rc_mode = le32_to_cpu(stats_buf->rc_mode); + ru_type = le32_to_cpu(stats_buf->ru_type); + + switch (rc_mode) { + case ATH12K_HTT_STATS_RC_MODE_DLSU: + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n"); + mode_prefix = "su"; + break; + case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO: + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n"); + mode_prefix = "mu"; + break; + case ATH12K_HTT_STATS_RC_MODE_DLOFDMA: + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n"); + mode_prefix = "ofdma"; + if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY) + ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS; + else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU) + ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS; + break; + case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO: + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n"); + mode_prefix = "ulmu"; + break; + case ATH12K_HTT_STATS_RC_MODE_ULOFDMA: + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n"); + mode_prefix = "ulofdma"; + if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY) + ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS; + else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU) + ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS; + break; + default: + return; + } + + len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n"); + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) + len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ", + mode_prefix); + else + len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ", + mode_prefix); + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, + le32_to_cpu(stats_buf->per_bw[i].ppdus_tried)); + len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i, + le32_to_cpu(stats_buf->per_bw320.ppdus_tried)); + + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) + len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ", + mode_prefix); + else + len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ", + mode_prefix); + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, + le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed)); + len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i, + le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed)); + + len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix); + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, + le32_to_cpu(stats_buf->per_bw[i].mpdus_tried)); + len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i, + le32_to_cpu(stats_buf->per_bw320.mpdus_tried)); + + len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix); + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u", i, + le32_to_cpu(stats_buf->per_bw[i].mpdus_failed)); + len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i, + le32_to_cpu(stats_buf->per_bw320.mpdus_failed)); + + len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n"); + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) + len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ", + mode_prefix); + else + len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ", + mode_prefix); + for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1, + le32_to_cpu(stats_buf->per_nss[i].ppdus_tried)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) + len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ", + mode_prefix); + else + len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ", + mode_prefix); + for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1, + le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix); + for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1, + le32_to_cpu(stats_buf->per_nss[i].mpdus_tried)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix); + for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1, + le32_to_cpu(stats_buf->per_nss[i].mpdus_failed)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n"); + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) + len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ", + mode_prefix); + else + len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ", + mode_prefix); + for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, + le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) + len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ", + mode_prefix); + else + len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ", + mode_prefix); + for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, + le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix); + for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, + le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix); + for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, + le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA || + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) && + ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) { + len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n"); + + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) + len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ", + mode_prefix); + else + len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ", + mode_prefix); + for (i = 0; i < ru_size_cnt; i++) + len += scnprintf(buf + len, buf_len - len, " %s:%u ", + ath12k_tx_ru_size_to_str(ru_type, i), + le32_to_cpu(stats_buf->ru[i].ppdus_tried)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) + len += scnprintf(buf + len, buf_len - len, + "non_data_ppdus_%s = ", mode_prefix); + else + len += scnprintf(buf + len, buf_len - len, + "ppdus_ack_failed_%s = ", mode_prefix); + for (i = 0; i < ru_size_cnt; i++) + len += scnprintf(buf + len, buf_len - len, " %s:%u ", + ath12k_tx_ru_size_to_str(ru_type, i), + le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", + mode_prefix); + for (i = 0; i < ru_size_cnt; i++) + len += scnprintf(buf + len, buf_len - len, " %s:%u ", + ath12k_tx_ru_size_to_str(ru_type, i), + le32_to_cpu(stats_buf->ru[i].mpdus_tried)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", + mode_prefix); + for (i = 0; i < ru_size_cnt; i++) + len += scnprintf(buf + len, buf_len - len, " %s:%u ", + ath12k_tx_ru_size_to_str(ru_type, i), + le32_to_cpu(stats_buf->ru[i].mpdus_failed)); + len += scnprintf(buf + len, buf_len - len, "\n\n"); + } + + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) { + len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw = %u\n", + le32_to_cpu(stats_buf->last_probed_bw)); + len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n", + le32_to_cpu(stats_buf->last_probed_nss)); + len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n", + le32_to_cpu(stats_buf->last_probed_mcs)); + len += print_array_to_buf(buf, len, "MU Probe count per RC MODE", + stats_buf->probe_cnt, + ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n"); + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_ast_entry_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_ast_entry_tlv *htt_stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 mac_addr_l32; + u32 mac_addr_h16; + u32 ast_info; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_addr_l32 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32); + mac_addr_h16 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16); + ast_info = le32_to_cpu(htt_stats_buf->info); + + len += scnprintf(buf + len, buf_len - len, "HTT_AST_ENTRY_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ast_index = %u\n", + le32_to_cpu(htt_stats_buf->ast_index)); + len += scnprintf(buf + len, buf_len - len, + "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", + u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_0), + u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_1), + u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_2), + u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_3), + u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_0), + u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_1)); + + len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n", + le32_to_cpu(htt_stats_buf->sw_peer_id)); + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", + u32_get_bits(ast_info, ATH12K_HTT_AST_PDEV_ID_INFO)); + len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n", + u32_get_bits(ast_info, ATH12K_HTT_AST_VDEV_ID_INFO)); + len += scnprintf(buf + len, buf_len - len, "next_hop = %u\n", + u32_get_bits(ast_info, ATH12K_HTT_AST_NEXT_HOP_INFO)); + len += scnprintf(buf + len, buf_len - len, "mcast = %u\n", + u32_get_bits(ast_info, ATH12K_HTT_AST_MCAST_INFO)); + len += scnprintf(buf + len, buf_len - len, "monitor_direct = %u\n", + u32_get_bits(ast_info, ATH12K_HTT_AST_MONITOR_DIRECT_INFO)); + len += scnprintf(buf + len, buf_len - len, "mesh_sta = %u\n", + u32_get_bits(ast_info, ATH12K_HTT_AST_MESH_STA_INFO)); + len += scnprintf(buf + len, buf_len - len, "mec = %u\n", + u32_get_bits(ast_info, ATH12K_HTT_AST_MEC_INFO)); + len += scnprintf(buf + len, buf_len - len, "intra_bss = %u\n\n", + u32_get_bits(ast_info, ATH12K_HTT_AST_INTRA_BSS_INFO)); + + stats_req->buf_len = len; +} + +static const char* +ath12k_htt_get_punct_dir_type_str(enum ath12k_htt_stats_direction direction) +{ + switch (direction) { + case ATH12K_HTT_STATS_DIRECTION_TX: + return "tx"; + case ATH12K_HTT_STATS_DIRECTION_RX: + return "rx"; + default: + return "unknown"; + } +} + +static const char* +ath12k_htt_get_punct_ppdu_type_str(enum ath12k_htt_stats_ppdu_type ppdu_type) +{ + switch (ppdu_type) { + case ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU: + return "su"; + case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO: + return "dl_mu_mimo"; + case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO: + return "ul_mu_mimo"; + case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA: + return "dl_mu_ofdma"; + case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA: + return "ul_mu_ofdma"; + default: + return "unknown"; + } +} + +static const char* +ath12k_htt_get_punct_pream_type_str(enum ath12k_htt_stats_param_type pream_type) +{ + switch (pream_type) { + case ATH12K_HTT_STATS_PREAM_OFDM: + return "ofdm"; + case ATH12K_HTT_STATS_PREAM_CCK: + return "cck"; + case ATH12K_HTT_STATS_PREAM_HT: + return "ht"; + case ATH12K_HTT_STATS_PREAM_VHT: + return "ac"; + case ATH12K_HTT_STATS_PREAM_HE: + return "ax"; + case ATH12K_HTT_STATS_PREAM_EHT: + return "be"; + default: + return "unknown"; + } +} + +static void +ath12k_htt_print_puncture_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_puncture_stats_tlv *stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + const char *direction; + const char *ppdu_type; + const char *preamble; + u32 mac_id__word; + u32 subband_limit; + u8 i; + + if (tag_len < sizeof(*stats_buf)) + return; + + mac_id__word = le32_to_cpu(stats_buf->mac_id__word); + subband_limit = min(le32_to_cpu(stats_buf->subband_cnt), + ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT); + + direction = ath12k_htt_get_punct_dir_type_str(le32_to_cpu(stats_buf->direction)); + ppdu_type = ath12k_htt_get_punct_ppdu_type_str(le32_to_cpu(stats_buf->ppdu_type)); + preamble = ath12k_htt_get_punct_pream_type_str(le32_to_cpu(stats_buf->preamble)); + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_PUNCTURE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id__word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, + "%s_%s_%s_last_used_pattern_mask = 0x%08x\n", + direction, preamble, ppdu_type, + le32_to_cpu(stats_buf->last_used_pattern_mask)); + + for (i = 0; i < subband_limit; i++) { + len += scnprintf(buf + len, buf_len - len, + "%s_%s_%s_num_subbands_used_cnt_%02d = %u\n", + direction, preamble, ppdu_type, i + 1, + le32_to_cpu(stats_buf->num_subbands_used_cnt[i])); + } + len += scnprintf(buf + len, buf_len - len, "\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_dmac_reset_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u64 time; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_DMAC_RESET_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "reset_count = %u\n", + le32_to_cpu(htt_stats_buf->reset_count)); + time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms, + htt_stats_buf->reset_time_lo_ms); + len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time); + time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms, + htt_stats_buf->disengage_time_lo_ms); + len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time); + + time = ath12k_le32hilo_to_u64(htt_stats_buf->engage_time_hi_ms, + htt_stats_buf->engage_time_lo_ms); + len += scnprintf(buf + len, buf_len - len, "engage_time_ms = %llu\n", time); + + len += scnprintf(buf + len, buf_len - len, "disengage_count = %u\n", + le32_to_cpu(htt_stats_buf->disengage_count)); + len += scnprintf(buf + len, buf_len - len, "engage_count = %u\n", + le32_to_cpu(htt_stats_buf->engage_count)); + len += scnprintf(buf + len, buf_len - len, "drain_dest_ring_mask = 0x%x\n\n", + le32_to_cpu(htt_stats_buf->drain_dest_ring_mask)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_sched_algo_ofdma_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_SCHED_ALGO_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += print_array_to_buf(buf, len, "rate_based_dlofdma_enabled_count", + htt_stats_buf->rate_based_dlofdma_enabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "rate_based_dlofdma_disabled_count", + htt_stats_buf->rate_based_dlofdma_disabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "rate_based_dlofdma_probing_count", + htt_stats_buf->rate_based_dlofdma_disabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "rate_based_dlofdma_monitoring_count", + htt_stats_buf->rate_based_dlofdma_monitor_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_enabled_count", + htt_stats_buf->chan_acc_lat_based_dlofdma_enabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_disabled_count", + htt_stats_buf->chan_acc_lat_based_dlofdma_disabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_monitoring_count", + htt_stats_buf->chan_acc_lat_based_dlofdma_monitor_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "downgrade_to_dl_su_ru_alloc_fail", + htt_stats_buf->downgrade_to_dl_su_ru_alloc_fail, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "candidate_list_single_user_disable_ofdma", + htt_stats_buf->candidate_list_single_user_disable_ofdma, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "dl_cand_list_dropped_high_ul_qos_weight", + htt_stats_buf->dl_cand_list_dropped_high_ul_qos_weight, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "ax_dlofdma_disabled_due_to_pipelining", + htt_stats_buf->ax_dlofdma_disabled_due_to_pipelining, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "dlofdma_disabled_su_only_eligible", + htt_stats_buf->dlofdma_disabled_su_only_eligible, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "dlofdma_disabled_consec_no_mpdus_tried", + htt_stats_buf->dlofdma_disabled_consec_no_mpdus_tried, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "dlofdma_disabled_consec_no_mpdus_success", + htt_stats_buf->dlofdma_disabled_consec_no_mpdus_success, + ATH12K_HTT_NUM_AC_WMM, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + u8 i; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_RATE_STATS_BE_OFDMA_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->be_ofdma_tx_ldpc)); + len += print_array_to_buf(buf, len, "be_ofdma_tx_mcs", + htt_stats_buf->be_ofdma_tx_mcs, + ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n"); + len += print_array_to_buf(buf, len, "be_ofdma_eht_sig_mcs", + htt_stats_buf->be_ofdma_eht_sig_mcs, + ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS, "\n"); + len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ru_size = "); + for (i = 0; i < ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %s:%u ", + ath12k_htt_be_tx_rx_ru_size_to_str(i), + le32_to_cpu(htt_stats_buf->be_ofdma_tx_ru_size[i])); + len += scnprintf(buf + len, buf_len - len, "\n"); + len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1, + htt_stats_buf->be_ofdma_tx_nss, + ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS, + "\n"); + len += print_array_to_buf(buf, len, "be_ofdma_tx_bw", + htt_stats_buf->be_ofdma_tx_bw, + ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS, "\n"); + for (i = 0; i < ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS; i++) { + len += scnprintf(buf + len, buf_len - len, + "be_ofdma_tx_gi[%u]", i); + len += print_array_to_buf(buf, len, "", htt_stats_buf->gi[i], + ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n"); + } + len += scnprintf(buf + len, buf_len - len, "\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n", + le32_to_cpu(htt_stats_buf->basic_trigger_across_bss)); + len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n", + le32_to_cpu(htt_stats_buf->basic_trigger_within_bss)); + len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n", + le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss)); + len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n", + le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss)); + len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n", + le32_to_cpu(htt_stats_buf->mu_rts_across_bss)); + len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n", + le32_to_cpu(htt_stats_buf->mu_rts_within_bss)); + len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n", + le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss)); + len += scnprintf(buf + len, buf_len - len, + "ul_mumimo_trigger_within_bss = %u\n\n", + le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss)); + + stats_req->buf_len = len; +} + +static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, + u16 tag, u16 len, const void *tag_buf, + void *user_data) +{ + struct debug_htt_stats_req *stats_req = user_data; + + switch (tag) { + case HTT_STATS_TX_PDEV_CMN_TAG: + htt_print_tx_pdev_stats_cmn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_UNDERRUN_TAG: + htt_print_tx_pdev_stats_urrn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SIFS_TAG: + htt_print_tx_pdev_stats_sifs_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_FLUSH_TAG: + htt_print_tx_pdev_stats_flush_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SIFS_HIST_TAG: + htt_print_tx_pdev_stats_sifs_hist_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG: + htt_print_pdev_ctrl_path_tx_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_MU_PPDU_DIST_TAG: + htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SCHED_CMN_TAG: + ath12k_htt_print_stats_tx_sched_cmn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG: + ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG: + ath12k_htt_print_sched_txq_cmd_posted_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG: + ath12k_htt_print_sched_txq_cmd_reaped_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG: + ath12k_htt_print_sched_txq_sched_order_su_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG: + ath12k_htt_print_sched_txq_sched_ineligibility_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG: + ath12k_htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_HW_PDEV_ERRS_TAG: + ath12k_htt_print_hw_stats_pdev_errs_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_HW_INTR_MISC_TAG: + ath12k_htt_print_hw_stats_intr_misc_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_WHAL_TX_TAG: + ath12k_htt_print_hw_stats_whal_tx_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_HW_WAR_TAG: + ath12k_htt_print_hw_war_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_CMN_TAG: + ath12k_htt_print_tx_tqm_cmn_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_ERROR_STATS_TAG: + ath12k_htt_print_tx_tqm_error_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_GEN_MPDU_TAG: + ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_LIST_MPDU_TAG: + ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG: + ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_PDEV_TAG: + ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_CMN_TAG: + ath12k_htt_print_tx_de_cmn_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG: + ath12k_htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG: + ath12k_htt_print_tx_de_classify_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG: + ath12k_htt_print_tx_de_classify_failed_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG: + ath12k_htt_print_tx_de_classify_status_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG: + ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG: + ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_COMPL_STATS_TAG: + ath12k_htt_print_tx_de_compl_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG: + ath12k_htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AC_STATS_TAG: + ath12k_htt_print_tx_selfgen_ac_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AX_STATS_TAG: + ath12k_htt_print_tx_selfgen_ax_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_BE_STATS_TAG: + ath12k_htt_print_tx_selfgen_be_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG: + ath12k_htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG: + ath12k_htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG: + ath12k_htt_print_tx_selfgen_be_err_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG: + ath12k_htt_print_tx_selfgen_ac_sched_status_stats_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG: + ath12k_htt_print_tx_selfgen_ax_sched_status_stats_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG: + ath12k_htt_print_tx_selfgen_be_sched_status_stats_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_STRING_TAG: + ath12k_htt_print_stats_string_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SRING_STATS_TAG: + ath12k_htt_print_sring_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SFM_CMN_TAG: + ath12k_htt_print_sfm_cmn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SFM_CLIENT_TAG: + ath12k_htt_print_sfm_client_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SFM_CLIENT_USER_TAG: + ath12k_htt_print_sfm_client_user_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG: + ath12k_htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG: + ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_MPDU_STATS_TAG: + ath12k_htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG: + case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG: + case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG: + ath12k_htt_print_pdev_cca_stats_hist_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_CCA_COUNTERS_TAG: + ath12k_htt_print_pdev_stats_cca_counters_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_OBSS_PD_TAG: + ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG: + ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG: + ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG: + ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG: + ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG: + ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG: + ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_DLPAGER_STATS_TAG: + ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PHY_STATS_TAG: + ath12k_htt_print_phy_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PHY_COUNTERS_TAG: + ath12k_htt_print_phy_counters_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PHY_RESET_STATS_TAG: + ath12k_htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PHY_RESET_COUNTERS_TAG: + ath12k_htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PHY_TPC_STATS_TAG: + ath12k_htt_print_phy_tpc_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG: + ath12k_htt_print_soc_txrx_stats_common_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PER_RATE_STATS_TAG: + ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_AST_ENTRY_TAG: + ath12k_htt_print_ast_entry_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_PUNCTURE_STATS_TAG: + ath12k_htt_print_puncture_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_DMAC_RESET_STATS_TAG: + ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG: + ath12k_htt_print_pdev_sched_algo_ofdma_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG: + ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG: + ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len, + stats_req); + break; + default: + break; + } + + return 0; +} + +void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_htt_extd_stats_msg *msg; + struct debug_htt_stats_req *stats_req; + struct ath12k *ar; + u32 len, pdev_id, stats_info; + u64 cookie; + int ret; + bool send_completion = false; + + msg = (struct ath12k_htt_extd_stats_msg *)skb->data; + cookie = le64_to_cpu(msg->cookie); + + if (u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_MSB) != + ATH12K_HTT_STATS_MAGIC_VALUE) { + ath12k_warn(ab, "received invalid htt ext stats event\n"); + return; + } + + pdev_id = u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_LSB); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); + if (!ar) { + ath12k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id); + goto exit; + } + + stats_req = ar->debug.htt_stats.stats_req; + if (!stats_req) + goto exit; + + spin_lock_bh(&ar->data_lock); + + stats_info = le32_to_cpu(msg->info1); + stats_req->done = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE); + if (stats_req->done) + send_completion = true; + + spin_unlock_bh(&ar->data_lock); + + len = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH); + if (len > skb->len) { + ath12k_warn(ab, "invalid length %d for HTT stats", len); + goto exit; + } + + ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, + ath12k_dbg_htt_ext_stats_parse, + stats_req); + if (ret) + ath12k_warn(ab, "Failed to parse tlv %d\n", ret); + + if (send_completion) + complete(&stats_req->htt_stats_rcvd); +exit: + rcu_read_unlock(); +} + +static ssize_t ath12k_read_htt_stats_type(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + char buf[32]; + size_t len; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + type = ar->debug.htt_stats.type; + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + + len = scnprintf(buf, sizeof(buf), "%u\n", type); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath12k_write_htt_stats_type(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + unsigned int cfg_param[4] = {0}; + const int size = 32; + int num_args; + + char *buf __free(kfree) = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + num_args = sscanf(buf, "%u %u %u %u %u\n", &type, &cfg_param[0], + &cfg_param[1], &cfg_param[2], &cfg_param[3]); + if (!num_args || num_args > 5) + return -EINVAL; + + if (type == ATH12K_DBG_HTT_EXT_STATS_RESET || + type >= ATH12K_DBG_HTT_NUM_EXT_STATS) + return -EINVAL; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + + ar->debug.htt_stats.type = type; + ar->debug.htt_stats.cfg_param[0] = cfg_param[0]; + ar->debug.htt_stats.cfg_param[1] = cfg_param[1]; + ar->debug.htt_stats.cfg_param[2] = cfg_param[2]; + ar->debug.htt_stats.cfg_param[3] = cfg_param[3]; + + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + + return count; +} + +static const struct file_operations fops_htt_stats_type = { + .read = ath12k_read_htt_stats_type, + .write = ath12k_write_htt_stats_type, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath12k_debugfs_htt_stats_req(struct ath12k *ar) +{ + struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req; + enum ath12k_dbg_htt_ext_stats_type type = stats_req->type; + u64 cookie; + int ret, pdev_id; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + init_completion(&stats_req->htt_stats_rcvd); + + pdev_id = ath12k_mac_get_target_pdev_id(ar); + stats_req->done = false; + stats_req->pdev_id = pdev_id; + + cookie = u64_encode_bits(ATH12K_HTT_STATS_MAGIC_VALUE, + ATH12K_HTT_STATS_COOKIE_MSB); + cookie |= u64_encode_bits(pdev_id, ATH12K_HTT_STATS_COOKIE_LSB); + + if (stats_req->override_cfg_param) { + cfg_params.cfg0 = stats_req->cfg_param[0]; + cfg_params.cfg1 = stats_req->cfg_param[1]; + cfg_params.cfg2 = stats_req->cfg_param[2]; + cfg_params.cfg3 = stats_req->cfg_param[3]; + } + + ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie); + if (ret) { + ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); + return ret; + } + if (!wait_for_completion_timeout(&stats_req->htt_stats_rcvd, 3 * HZ)) { + spin_lock_bh(&ar->data_lock); + if (!stats_req->done) { + stats_req->done = true; + spin_unlock_bh(&ar->data_lock); + ath12k_warn(ar->ab, "stats request timed out\n"); + return -ETIMEDOUT; + } + spin_unlock_bh(&ar->data_lock); + } + + return 0; +} + +static int ath12k_open_htt_stats(struct inode *inode, + struct file *file) +{ + struct ath12k *ar = inode->i_private; + struct debug_htt_stats_req *stats_req; + enum ath12k_dbg_htt_ext_stats_type type = ar->debug.htt_stats.type; + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + int ret; + + if (type == ATH12K_DBG_HTT_EXT_STATS_RESET) + return -EPERM; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + + if (ah->state != ATH12K_HW_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + if (ar->debug.htt_stats.stats_req) { + ret = -EAGAIN; + goto err_unlock; + } + + stats_req = kzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE, GFP_KERNEL); + if (!stats_req) { + ret = -ENOMEM; + goto err_unlock; + } + + ar->debug.htt_stats.stats_req = stats_req; + stats_req->type = type; + stats_req->cfg_param[0] = ar->debug.htt_stats.cfg_param[0]; + stats_req->cfg_param[1] = ar->debug.htt_stats.cfg_param[1]; + stats_req->cfg_param[2] = ar->debug.htt_stats.cfg_param[2]; + stats_req->cfg_param[3] = ar->debug.htt_stats.cfg_param[3]; + stats_req->override_cfg_param = !!stats_req->cfg_param[0] || + !!stats_req->cfg_param[1] || + !!stats_req->cfg_param[2] || + !!stats_req->cfg_param[3]; + + ret = ath12k_debugfs_htt_stats_req(ar); + if (ret < 0) + goto out; + + file->private_data = stats_req; + + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + + return 0; +out: + kfree(stats_req); + ar->debug.htt_stats.stats_req = NULL; +err_unlock: + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + + return ret; +} + +static int ath12k_release_htt_stats(struct inode *inode, + struct file *file) +{ + struct ath12k *ar = inode->i_private; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + kfree(file->private_data); + ar->debug.htt_stats.stats_req = NULL; + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + + return 0; +} + +static ssize_t ath12k_read_htt_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct debug_htt_stats_req *stats_req = file->private_data; + char *buf; + u32 length; + + buf = stats_req->buf; + length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE); + return simple_read_from_buffer(user_buf, count, ppos, buf, length); +} + +static const struct file_operations fops_dump_htt_stats = { + .open = ath12k_open_htt_stats, + .release = ath12k_release_htt_stats, + .read = ath12k_read_htt_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath12k_write_htt_stats_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + u8 param_pos; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 0, &type); + if (ret) + return ret; + + if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS || + type == ATH12K_DBG_HTT_EXT_STATS_RESET) + return -E2BIG; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET; + param_pos = (type >> 5) + 1; + + switch (param_pos) { + case ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES: + cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type); + break; + case ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES: + cfg_params.cfg2 = ATH12K_HTT_STATS_RESET_BITMAP32_BIT(cfg_params.cfg0 + + type); + break; + case ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES: + cfg_params.cfg3 = ATH12K_HTT_STATS_RESET_BITMAP64_BIT(cfg_params.cfg0 + + type); + break; + default: + break; + } + + ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, + ATH12K_DBG_HTT_EXT_STATS_RESET, + &cfg_params, + 0ULL); + if (ret) { + ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + return ret; + } + + ar->debug.htt_stats.reset = type; + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + + return count; +} + +static const struct file_operations fops_htt_stats_reset = { + .write = ath12k_write_htt_stats_reset, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath12k_debugfs_htt_stats_register(struct ath12k *ar) +{ + debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev, + ar, &fops_htt_stats_type); + debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev, + ar, &fops_dump_htt_stats); + debugfs_create_file("htt_stats_reset", 0200, ar->debug.debugfs_pdev, + ar, &fops_htt_stats_reset); +} diff --git a/debugfs_htt_stats.h b/debugfs_htt_stats.h new file mode 100644 index 000000000000..4b59976fbc35 --- /dev/null +++ b/debugfs_htt_stats.h @@ -0,0 +1,1494 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef DEBUG_HTT_STATS_H +#define DEBUG_HTT_STATS_H + +#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512) +#define ATH12K_HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0) +#define ATH12K_HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32) +#define ATH12K_HTT_STATS_MAGIC_VALUE 0xF0F0F0F0 +#define ATH12K_HTT_STATS_SUBTYPE_MAX 16 +#define ATH12K_HTT_MAX_STRING_LEN 256 + +#define ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx) ((_idx) & 0x1f) +#define ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx) ((_idx) & 0x3f) +#define ATH12K_HTT_STATS_RESET_BITMAP32_BIT(_idx) (1 << \ + ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx)) +#define ATH12K_HTT_STATS_RESET_BITMAP64_BIT(_idx) (1 << \ + ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx)) + +void ath12k_debugfs_htt_stats_register(struct ath12k *ar); + +#ifdef CONFIG_ATH12K_DEBUGFS +void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb); +#else /* CONFIG_ATH12K_DEBUGFS */ +static inline void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ +} +#endif + +/** + * DOC: target -> host extended statistics upload + * + * The following field definitions describe the format of the HTT + * target to host stats upload confirmation message. + * The message contains a cookie echoed from the HTT host->target stats + * upload request, which identifies which request the confirmation is + * for, and a single stats can span over multiple HTT stats indication + * due to the HTT message size limitation so every HTT ext stats + * indication will have tag-length-value stats information elements. + * The tag-length header for each HTT stats IND message also includes a + * status field, to indicate whether the request for the stat type in + * question was fully met, partially met, unable to be met, or invalid + * (if the stat type in question is disabled in the target). + * A Done bit 1's indicate the end of the of stats info elements. + * + * + * |31 16|15 12|11|10 8|7 5|4 0| + * |--------------------------------------------------------------| + * | reserved | msg type | + * |--------------------------------------------------------------| + * | cookie LSBs | + * |--------------------------------------------------------------| + * | cookie MSBs | + * |--------------------------------------------------------------| + * | stats entry length | rsvd | D| S | stat type | + * |--------------------------------------------------------------| + * | type-specific stats info | + * | (see debugfs_htt_stats.h) | + * |--------------------------------------------------------------| + * Header fields: + * - MSG_TYPE + * Bits 7:0 + * Purpose: Identifies this is a extended statistics upload confirmation + * message. + * Value: 0x1c + * - COOKIE_LSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: MSBs of the opaque cookie specified by the host-side requestor + * - COOKIE_MSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: MSBs of the opaque cookie specified by the host-side requestor + * + * Stats Information Element tag-length header fields: + * - STAT_TYPE + * Bits 7:0 + * Purpose: identifies the type of statistics info held in the + * following information element + * Value: ath12k_dbg_htt_ext_stats_type + * - STATUS + * Bits 10:8 + * Purpose: indicate whether the requested stats are present + * Value: + * 0 -> The requested stats have been delivered in full + * 1 -> The requested stats have been delivered in part + * 2 -> The requested stats could not be delivered (error case) + * 3 -> The requested stat type is either not recognized (invalid) + * - DONE + * Bits 11 + * Purpose: + * Indicates the completion of the stats entry, this will be the last + * stats conf HTT segment for the requested stats type. + * Value: + * 0 -> the stats retrieval is ongoing + * 1 -> the stats retrieval is complete + * - LENGTH + * Bits 31:16 + * Purpose: indicate the stats information size + * Value: This field specifies the number of bytes of stats information + * that follows the element tag-length header. + * It is expected but not required that this length is a multiple of + * 4 bytes. + */ + +#define ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE BIT(11) +#define ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16) + +struct ath12k_htt_extd_stats_msg { + __le32 info0; + __le64 cookie; + __le32 info1; + u8 data[]; +} __packed; + +/* htt_dbg_ext_stats_type */ +enum ath12k_dbg_htt_ext_stats_type { + ATH12K_DBG_HTT_EXT_STATS_RESET = 0, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, + ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, + ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, + ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12, + ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15, + ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, + ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, + ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31, + ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32, + ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36, + ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37, + ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38, + ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40, + ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41, + ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45, + ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46, + ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49, + ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51, + ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54, + + /* keep this last */ + ATH12K_DBG_HTT_NUM_EXT_STATS, +}; + +enum ath12k_dbg_htt_tlv_tag { + HTT_STATS_TX_PDEV_CMN_TAG = 0, + HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1, + HTT_STATS_TX_PDEV_SIFS_TAG = 2, + HTT_STATS_TX_PDEV_FLUSH_TAG = 3, + HTT_STATS_STRING_TAG = 5, + HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11, + HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12, + HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13, + HTT_STATS_TX_TQM_CMN_TAG = 14, + HTT_STATS_TX_TQM_PDEV_TAG = 15, + HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17, + HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18, + HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19, + HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20, + HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21, + HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22, + HTT_STATS_TX_DE_CMN_TAG = 23, + HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25, + HTT_STATS_SFM_CMN_TAG = 26, + HTT_STATS_SRING_STATS_TAG = 27, + HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36, + HTT_STATS_TX_SCHED_CMN_TAG = 37, + HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39, + HTT_STATS_SFM_CLIENT_USER_TAG = 41, + HTT_STATS_SFM_CLIENT_TAG = 42, + HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43, + HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44, + HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46, + HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47, + HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48, + HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49, + HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50, + HTT_STATS_HW_INTR_MISC_TAG = 54, + HTT_STATS_HW_PDEV_ERRS_TAG = 56, + HTT_STATS_TX_DE_COMPL_STATS_TAG = 65, + HTT_STATS_WHAL_TX_TAG = 66, + HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67, + HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70, + HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71, + HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72, + HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73, + HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74, + HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86, + HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87, + HTT_STATS_PDEV_OBSS_PD_TAG = 88, + HTT_STATS_HW_WAR_TAG = 89, + HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100, + HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, + HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108, + HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111, + HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112, + HTT_STATS_DLPAGER_STATS_TAG = 120, + HTT_STATS_PHY_COUNTERS_TAG = 121, + HTT_STATS_PHY_STATS_TAG = 122, + HTT_STATS_PHY_RESET_COUNTERS_TAG = 123, + HTT_STATS_PHY_RESET_STATS_TAG = 124, + HTT_STATS_SOC_TXRX_STATS_COMMON_TAG = 125, + HTT_STATS_PER_RATE_STATS_TAG = 128, + HTT_STATS_MU_PPDU_DIST_TAG = 129, + HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130, + HTT_STATS_AST_ENTRY_TAG = 132, + HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135, + HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG = 137, + HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138, + HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139, + HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG = 147, + HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG = 148, + HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG = 149, + HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG = 150, + HTT_STATS_DMAC_RESET_STATS_TAG = 155, + HTT_STATS_PHY_TPC_STATS_TAG = 157, + HTT_STATS_PDEV_PUNCTURE_STATS_TAG = 158, + HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165, + HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG = 172, + HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176, + + HTT_STATS_MAX_TAG, +}; + +#define ATH12K_HTT_STATS_MAC_ID GENMASK(7, 0) + +#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9 +#define ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 150 + +/* MU MIMO distribution stats is a 2-dimensional array + * with dimension one denoting stats for nr4[0] or nr8[1] + */ +#define ATH12K_HTT_STATS_NUM_NR_BINS 2 +#define ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST 10 +#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10 +#define ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS 9 +#define ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS \ + (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS) +#define ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS \ + (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST) + +enum ath12k_htt_tx_pdev_underrun_enum { + HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0, + HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1, + HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2, + HTT_TX_PDEV_MAX_URRN_STATS = 3, +}; + +enum ath12k_htt_stats_reset_cfg_param_alloc_pos { + ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES = 1, + ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES, + ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES, +}; + +struct debug_htt_stats_req { + bool done; + bool override_cfg_param; + u8 pdev_id; + enum ath12k_dbg_htt_ext_stats_type type; + u32 cfg_param[4]; + u8 peer_addr[ETH_ALEN]; + struct completion htt_stats_rcvd; + u32 buf_len; + u8 buf[]; +}; + +struct ath12k_htt_tx_pdev_stats_cmn_tlv { + __le32 mac_id__word; + __le32 hw_queued; + __le32 hw_reaped; + __le32 underrun; + __le32 hw_paused; + __le32 hw_flush; + __le32 hw_filt; + __le32 tx_abort; + __le32 mpdu_requed; + __le32 tx_xretry; + __le32 data_rc; + __le32 mpdu_dropped_xretry; + __le32 illgl_rate_phy_err; + __le32 cont_xretry; + __le32 tx_timeout; + __le32 pdev_resets; + __le32 phy_underrun; + __le32 txop_ovf; + __le32 seq_posted; + __le32 seq_failed_queueing; + __le32 seq_completed; + __le32 seq_restarted; + __le32 mu_seq_posted; + __le32 seq_switch_hw_paused; + __le32 next_seq_posted_dsr; + __le32 seq_posted_isr; + __le32 seq_ctrl_cached; + __le32 mpdu_count_tqm; + __le32 msdu_count_tqm; + __le32 mpdu_removed_tqm; + __le32 msdu_removed_tqm; + __le32 mpdus_sw_flush; + __le32 mpdus_hw_filter; + __le32 mpdus_truncated; + __le32 mpdus_ack_failed; + __le32 mpdus_expired; + __le32 mpdus_seq_hw_retry; + __le32 ack_tlv_proc; + __le32 coex_abort_mpdu_cnt_valid; + __le32 coex_abort_mpdu_cnt; + __le32 num_total_ppdus_tried_ota; + __le32 num_data_ppdus_tried_ota; + __le32 local_ctrl_mgmt_enqued; + __le32 local_ctrl_mgmt_freed; + __le32 local_data_enqued; + __le32 local_data_freed; + __le32 mpdu_tried; + __le32 isr_wait_seq_posted; + + __le32 tx_active_dur_us_low; + __le32 tx_active_dur_us_high; + __le32 remove_mpdus_max_retries; + __le32 comp_delivered; + __le32 ppdu_ok; + __le32 self_triggers; + __le32 tx_time_dur_data; + __le32 seq_qdepth_repost_stop; + __le32 mu_seq_min_msdu_repost_stop; + __le32 seq_min_msdu_repost_stop; + __le32 seq_txop_repost_stop; + __le32 next_seq_cancel; + __le32 fes_offsets_err_cnt; + __le32 num_mu_peer_blacklisted; + __le32 mu_ofdma_seq_posted; + __le32 ul_mumimo_seq_posted; + __le32 ul_ofdma_seq_posted; + + __le32 thermal_suspend_cnt; + __le32 dfs_suspend_cnt; + __le32 tx_abort_suspend_cnt; + __le32 tgt_specific_opaque_txq_suspend_info; + __le32 last_suspend_reason; +} __packed; + +struct ath12k_htt_tx_pdev_stats_urrn_tlv { + DECLARE_FLEX_ARRAY(__le32, urrn_stats); +} __packed; + +struct ath12k_htt_tx_pdev_stats_flush_tlv { + DECLARE_FLEX_ARRAY(__le32, flush_errs); +} __packed; + +struct ath12k_htt_tx_pdev_stats_phy_err_tlv { + DECLARE_FLEX_ARRAY(__le32, phy_errs); +} __packed; + +struct ath12k_htt_tx_pdev_stats_sifs_tlv { + DECLARE_FLEX_ARRAY(__le32, sifs_status); +} __packed; + +struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv { + __le32 fw_tx_mgmt_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX]; +} __packed; + +struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv { + DECLARE_FLEX_ARRAY(__le32, sifs_hist_status); +} __packed; + +enum ath12k_htt_stats_hw_mode { + ATH12K_HTT_STATS_HWMODE_AC = 0, + ATH12K_HTT_STATS_HWMODE_AX = 1, + ATH12K_HTT_STATS_HWMODE_BE = 2, +}; + +struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv { + __le32 hw_mode; + __le32 num_seq_term_status[ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS]; + __le32 num_ppdu_cmpl_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; + __le32 num_seq_posted[ATH12K_HTT_STATS_NUM_NR_BINS]; + __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; +} __packed; + +#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0) +#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8) + +#define ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20 + +struct ath12k_htt_stats_tx_sched_cmn_tlv { + __le32 mac_id__word; + __le32 current_timestamp; +} __packed; + +struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv { + __le32 mac_id__word; + __le32 sched_policy; + __le32 last_sched_cmd_posted_timestamp; + __le32 last_sched_cmd_compl_timestamp; + __le32 sched_2_tac_lwm_count; + __le32 sched_2_tac_ring_full; + __le32 sched_cmd_post_failure; + __le32 num_active_tids; + __le32 num_ps_schedules; + __le32 sched_cmds_pending; + __le32 num_tid_register; + __le32 num_tid_unregister; + __le32 num_qstats_queried; + __le32 qstats_update_pending; + __le32 last_qstats_query_timestamp; + __le32 num_tqm_cmdq_full; + __le32 num_de_sched_algo_trigger; + __le32 num_rt_sched_algo_trigger; + __le32 num_tqm_sched_algo_trigger; + __le32 notify_sched; + __le32 dur_based_sendn_term; + __le32 su_notify2_sched; + __le32 su_optimal_queued_msdus_sched; + __le32 su_delay_timeout_sched; + __le32 su_min_txtime_sched_delay; + __le32 su_no_delay; + __le32 num_supercycles; + __le32 num_subcycles_with_sort; + __le32 num_subcycles_no_sort; +} __packed; + +struct ath12k_htt_sched_txq_cmd_posted_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_cmd_posted); +} __packed; + +struct ath12k_htt_sched_txq_cmd_reaped_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_cmd_reaped); +} __packed; + +struct ath12k_htt_sched_txq_sched_order_su_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_order_su); +} __packed; + +struct ath12k_htt_sched_txq_sched_ineligibility_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_ineligibility); +} __packed; + +enum ath12k_htt_sched_txq_supercycle_triggers_tlv_enum { + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX, +}; + +struct ath12k_htt_sched_txq_supercycle_triggers_tlv { + DECLARE_FLEX_ARRAY(__le32, supercycle_triggers); +} __packed; + +struct ath12k_htt_hw_stats_pdev_errs_tlv { + __le32 mac_id__word; + __le32 tx_abort; + __le32 tx_abort_fail_count; + __le32 rx_abort; + __le32 rx_abort_fail_count; + __le32 warm_reset; + __le32 cold_reset; + __le32 tx_flush; + __le32 tx_glb_reset; + __le32 tx_txq_reset; + __le32 rx_timeout_reset; + __le32 mac_cold_reset_restore_cal; + __le32 mac_cold_reset; + __le32 mac_warm_reset; + __le32 mac_only_reset; + __le32 phy_warm_reset; + __le32 phy_warm_reset_ucode_trig; + __le32 mac_warm_reset_restore_cal; + __le32 mac_sfm_reset; + __le32 phy_warm_reset_m3_ssr; + __le32 phy_warm_reset_reason_phy_m3; + __le32 phy_warm_reset_reason_tx_hw_stuck; + __le32 phy_warm_reset_reason_num_rx_frame_stuck; + __le32 phy_warm_reset_reason_wal_rx_rec_rx_busy; + __le32 phy_warm_reset_reason_wal_rx_rec_mac_hng; + __le32 phy_warm_reset_reason_mac_conv_phy_reset; + __le32 wal_rx_recovery_rst_mac_hang_cnt; + __le32 wal_rx_recovery_rst_known_sig_cnt; + __le32 wal_rx_recovery_rst_no_rx_cnt; + __le32 wal_rx_recovery_rst_no_rx_consec_cnt; + __le32 wal_rx_recovery_rst_rx_busy_cnt; + __le32 wal_rx_recovery_rst_phy_mac_hang_cnt; + __le32 rx_flush_cnt; + __le32 phy_warm_reset_reason_tx_exp_cca_stuck; + __le32 phy_warm_reset_reason_tx_consec_flsh_war; + __le32 phy_warm_reset_reason_tx_hwsch_reset_war; + __le32 phy_warm_reset_reason_hwsch_cca_wdog_war; + __le32 fw_rx_rings_reset; + __le32 rx_dest_drain_rx_descs_leak_prevented; + __le32 rx_dest_drain_rx_descs_saved_cnt; + __le32 rx_dest_drain_rxdma2reo_leak_detected; + __le32 rx_dest_drain_rxdma2fw_leak_detected; + __le32 rx_dest_drain_rxdma2wbm_leak_detected; + __le32 rx_dest_drain_rxdma1_2sw_leak_detected; + __le32 rx_dest_drain_rx_drain_ok_mac_idle; + __le32 rx_dest_drain_ok_mac_not_idle; + __le32 rx_dest_drain_prerequisite_invld; + __le32 rx_dest_drain_skip_non_lmac_reset; + __le32 rx_dest_drain_hw_fifo_notempty_post_wait; +} __packed; + +#define ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN 8 +struct ath12k_htt_hw_stats_intr_misc_tlv { + u8 hw_intr_name[ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN]; + __le32 mask; + __le32 count; +} __packed; + +struct ath12k_htt_hw_stats_whal_tx_tlv { + __le32 mac_id__word; + __le32 last_unpause_ppdu_id; + __le32 hwsch_unpause_wait_tqm_write; + __le32 hwsch_dummy_tlv_skipped; + __le32 hwsch_misaligned_offset_received; + __le32 hwsch_reset_count; + __le32 hwsch_dev_reset_war; + __le32 hwsch_delayed_pause; + __le32 hwsch_long_delayed_pause; + __le32 sch_rx_ppdu_no_response; + __le32 sch_selfgen_response; + __le32 sch_rx_sifs_resp_trigger; +} __packed; + +struct ath12k_htt_hw_war_stats_tlv { + __le32 mac_id__word; + DECLARE_FLEX_ARRAY(__le32, hw_wars); +} __packed; + +struct ath12k_htt_tx_tqm_cmn_stats_tlv { + __le32 mac_id__word; + __le32 max_cmdq_id; + __le32 list_mpdu_cnt_hist_intvl; + __le32 add_msdu; + __le32 q_empty; + __le32 q_not_empty; + __le32 drop_notification; + __le32 desc_threshold; + __le32 hwsch_tqm_invalid_status; + __le32 missed_tqm_gen_mpdus; + __le32 tqm_active_tids; + __le32 tqm_inactive_tids; + __le32 tqm_active_msduq_flows; + __le32 msduq_timestamp_updates; + __le32 msduq_updates_mpdu_head_info_cmd; + __le32 msduq_updates_emp_to_nonemp_status; + __le32 get_mpdu_head_info_cmds_by_query; + __le32 get_mpdu_head_info_cmds_by_tac; + __le32 gen_mpdu_cmds_by_query; + __le32 high_prio_q_not_empty; +} __packed; + +struct ath12k_htt_tx_tqm_error_stats_tlv { + __le32 q_empty_failure; + __le32 q_not_empty_failure; + __le32 add_msdu_failure; + __le32 tqm_cache_ctl_err; + __le32 tqm_soft_reset; + __le32 tqm_reset_num_in_use_link_descs; + __le32 tqm_reset_num_lost_link_descs; + __le32 tqm_reset_num_lost_host_tx_buf_cnt; + __le32 tqm_reset_num_in_use_internal_tqm; + __le32 tqm_reset_num_in_use_idle_link_rng; + __le32 tqm_reset_time_to_tqm_hang_delta_ms; + __le32 tqm_reset_recovery_time_ms; + __le32 tqm_reset_num_peers_hdl; + __le32 tqm_reset_cumm_dirty_hw_mpduq_cnt; + __le32 tqm_reset_cumm_dirty_hw_msduq_proc; + __le32 tqm_reset_flush_cache_cmd_su_cnt; + __le32 tqm_reset_flush_cache_cmd_other_cnt; + __le32 tqm_reset_flush_cache_cmd_trig_type; + __le32 tqm_reset_flush_cache_cmd_trig_cfg; + __le32 tqm_reset_flush_cmd_skp_status_null; +} __packed; + +struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv { + DECLARE_FLEX_ARRAY(__le32, gen_mpdu_end_reason); +} __packed; + +#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16 +#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16 + +struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv { + DECLARE_FLEX_ARRAY(__le32, list_mpdu_end_reason); +} __packed; + +struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv { + DECLARE_FLEX_ARRAY(__le32, list_mpdu_cnt_hist); +} __packed; + +struct ath12k_htt_tx_tqm_pdev_stats_tlv { + __le32 msdu_count; + __le32 mpdu_count; + __le32 remove_msdu; + __le32 remove_mpdu; + __le32 remove_msdu_ttl; + __le32 send_bar; + __le32 bar_sync; + __le32 notify_mpdu; + __le32 sync_cmd; + __le32 write_cmd; + __le32 hwsch_trigger; + __le32 ack_tlv_proc; + __le32 gen_mpdu_cmd; + __le32 gen_list_cmd; + __le32 remove_mpdu_cmd; + __le32 remove_mpdu_tried_cmd; + __le32 mpdu_queue_stats_cmd; + __le32 mpdu_head_info_cmd; + __le32 msdu_flow_stats_cmd; + __le32 remove_msdu_cmd; + __le32 remove_msdu_ttl_cmd; + __le32 flush_cache_cmd; + __le32 update_mpduq_cmd; + __le32 enqueue; + __le32 enqueue_notify; + __le32 notify_mpdu_at_head; + __le32 notify_mpdu_state_valid; + __le32 sched_udp_notify1; + __le32 sched_udp_notify2; + __le32 sched_nonudp_notify1; + __le32 sched_nonudp_notify2; +} __packed; + +struct ath12k_htt_tx_de_cmn_stats_tlv { + __le32 mac_id__word; + __le32 tcl2fw_entry_count; + __le32 not_to_fw; + __le32 invalid_pdev_vdev_peer; + __le32 tcl_res_invalid_addrx; + __le32 wbm2fw_entry_count; + __le32 invalid_pdev; + __le32 tcl_res_addrx_timeout; + __le32 invalid_vdev; + __le32 invalid_tcl_exp_frame_desc; + __le32 vdev_id_mismatch_cnt; +} __packed; + +struct ath12k_htt_tx_de_eapol_packets_stats_tlv { + __le32 m1_packets; + __le32 m2_packets; + __le32 m3_packets; + __le32 m4_packets; + __le32 g1_packets; + __le32 g2_packets; + __le32 rc4_packets; + __le32 eap_packets; + __le32 eapol_start_packets; + __le32 eapol_logoff_packets; + __le32 eapol_encap_asf_packets; +} __packed; + +struct ath12k_htt_tx_de_classify_stats_tlv { + __le32 arp_packets; + __le32 igmp_packets; + __le32 dhcp_packets; + __le32 host_inspected; + __le32 htt_included; + __le32 htt_valid_mcs; + __le32 htt_valid_nss; + __le32 htt_valid_preamble_type; + __le32 htt_valid_chainmask; + __le32 htt_valid_guard_interval; + __le32 htt_valid_retries; + __le32 htt_valid_bw_info; + __le32 htt_valid_power; + __le32 htt_valid_key_flags; + __le32 htt_valid_no_encryption; + __le32 fse_entry_count; + __le32 fse_priority_be; + __le32 fse_priority_high; + __le32 fse_priority_low; + __le32 fse_traffic_ptrn_be; + __le32 fse_traffic_ptrn_over_sub; + __le32 fse_traffic_ptrn_bursty; + __le32 fse_traffic_ptrn_interactive; + __le32 fse_traffic_ptrn_periodic; + __le32 fse_hwqueue_alloc; + __le32 fse_hwqueue_created; + __le32 fse_hwqueue_send_to_host; + __le32 mcast_entry; + __le32 bcast_entry; + __le32 htt_update_peer_cache; + __le32 htt_learning_frame; + __le32 fse_invalid_peer; + __le32 mec_notify; +} __packed; + +struct ath12k_htt_tx_de_classify_failed_stats_tlv { + __le32 ap_bss_peer_not_found; + __le32 ap_bcast_mcast_no_peer; + __le32 sta_delete_in_progress; + __le32 ibss_no_bss_peer; + __le32 invalid_vdev_type; + __le32 invalid_ast_peer_entry; + __le32 peer_entry_invalid; + __le32 ethertype_not_ip; + __le32 eapol_lookup_failed; + __le32 qpeer_not_allow_data; + __le32 fse_tid_override; + __le32 ipv6_jumbogram_zero_length; + __le32 qos_to_non_qos_in_prog; + __le32 ap_bcast_mcast_eapol; + __le32 unicast_on_ap_bss_peer; + __le32 ap_vdev_invalid; + __le32 incomplete_llc; + __le32 eapol_duplicate_m3; + __le32 eapol_duplicate_m4; +} __packed; + +struct ath12k_htt_tx_de_classify_status_stats_tlv { + __le32 eok; + __le32 classify_done; + __le32 lookup_failed; + __le32 send_host_dhcp; + __le32 send_host_mcast; + __le32 send_host_unknown_dest; + __le32 send_host; + __le32 status_invalid; +} __packed; + +struct ath12k_htt_tx_de_enqueue_packets_stats_tlv { + __le32 enqueued_pkts; + __le32 to_tqm; + __le32 to_tqm_bypass; +} __packed; + +struct ath12k_htt_tx_de_enqueue_discard_stats_tlv { + __le32 discarded_pkts; + __le32 local_frames; + __le32 is_ext_msdu; +} __packed; + +struct ath12k_htt_tx_de_compl_stats_tlv { + __le32 tcl_dummy_frame; + __le32 tqm_dummy_frame; + __le32 tqm_notify_frame; + __le32 fw2wbm_enq; + __le32 tqm_bypass_frame; +} __packed; + +enum ath12k_htt_tx_mumimo_grp_invalid_reason_code_stats { + ATH12K_HTT_TX_MUMIMO_GRP_VALID, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_NUM_MU_USERS_EXCEEDED_MU_MAX_USERS, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_SCHED_ALGO_NOT_MU_COMPATIBLE_GID, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_NON_PRIMARY_GRP, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_ZERO_CANDIDATES, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MORE_CANDIDATES, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_SIZE_EXCEED_NSS, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_INELIGIBLE, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_EFF_MU_TPUT_OMBPS, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE, +}; + +#define ATH12K_HTT_NUM_AC_WMM 0x4 +#define ATH12K_HTT_MAX_NUM_SBT_INTR 4 +#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4 +#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8 +#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS 7 +#define ATH12K_HTT_TX_NUM_OFDMA_USER_STATS 74 +#define ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS 8 +#define ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ 8 +#define ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS 10 + +#define ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE \ + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE +#define ATH12K_HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS \ + (ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ * ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE) + +struct ath12k_htt_tx_selfgen_cmn_stats_tlv { + __le32 mac_id__word; + __le32 su_bar; + __le32 rts; + __le32 cts2self; + __le32 qos_null; + __le32 delayed_bar_1; + __le32 delayed_bar_2; + __le32 delayed_bar_3; + __le32 delayed_bar_4; + __le32 delayed_bar_5; + __le32 delayed_bar_6; + __le32 delayed_bar_7; +} __packed; + +struct ath12k_htt_tx_selfgen_ac_stats_tlv { + __le32 ac_su_ndpa; + __le32 ac_su_ndp; + __le32 ac_mu_mimo_ndpa; + __le32 ac_mu_mimo_ndp; + __le32 ac_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1]; +} __packed; + +struct ath12k_htt_tx_selfgen_ax_stats_tlv { + __le32 ax_su_ndpa; + __le32 ax_su_ndp; + __le32 ax_mu_mimo_ndpa; + __le32 ax_mu_mimo_ndp; + __le32 ax_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1]; + __le32 ax_basic_trigger; + __le32 ax_bsr_trigger; + __le32 ax_mu_bar_trigger; + __le32 ax_mu_rts_trigger; + __le32 ax_ulmumimo_trigger; +} __packed; + +struct ath12k_htt_tx_selfgen_be_stats_tlv { + __le32 be_su_ndpa; + __le32 be_su_ndp; + __le32 be_mu_mimo_ndpa; + __le32 be_mu_mimo_ndp; + __le32 be_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1]; + __le32 be_basic_trigger; + __le32 be_bsr_trigger; + __le32 be_mu_bar_trigger; + __le32 be_mu_rts_trigger; + __le32 be_ulmumimo_trigger; + __le32 be_su_ndpa_queued; + __le32 be_su_ndp_queued; + __le32 be_mu_mimo_ndpa_queued; + __le32 be_mu_mimo_ndp_queued; + __le32 be_mu_mimo_brpoll_queued[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1]; + __le32 be_ul_mumimo_trigger[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; +} __packed; + +struct ath12k_htt_tx_selfgen_ac_err_stats_tlv { + __le32 ac_su_ndp_err; + __le32 ac_su_ndpa_err; + __le32 ac_mu_mimo_ndpa_err; + __le32 ac_mu_mimo_ndp_err; + __le32 ac_mu_mimo_brp1_err; + __le32 ac_mu_mimo_brp2_err; + __le32 ac_mu_mimo_brp3_err; +} __packed; + +struct ath12k_htt_tx_selfgen_ax_err_stats_tlv { + __le32 ax_su_ndp_err; + __le32 ax_su_ndpa_err; + __le32 ax_mu_mimo_ndpa_err; + __le32 ax_mu_mimo_ndp_err; + __le32 ax_mu_mimo_brp_err[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1]; + __le32 ax_basic_trigger_err; + __le32 ax_bsr_trigger_err; + __le32 ax_mu_bar_trigger_err; + __le32 ax_mu_rts_trigger_err; + __le32 ax_ulmumimo_trigger_err; +} __packed; + +struct ath12k_htt_tx_selfgen_be_err_stats_tlv { + __le32 be_su_ndp_err; + __le32 be_su_ndpa_err; + __le32 be_mu_mimo_ndpa_err; + __le32 be_mu_mimo_ndp_err; + __le32 be_mu_mimo_brp_err[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1]; + __le32 be_basic_trigger_err; + __le32 be_bsr_trigger_err; + __le32 be_mu_bar_trigger_err; + __le32 be_mu_rts_trigger_err; + __le32 be_ulmumimo_trigger_err; + __le32 be_mu_mimo_brp_err_num_cbf_rxd[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; + __le32 be_su_ndpa_flushed; + __le32 be_su_ndp_flushed; + __le32 be_mu_mimo_ndpa_flushed; + __le32 be_mu_mimo_ndp_flushed; + __le32 be_mu_mimo_brpoll_flushed[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1]; + __le32 be_ul_mumimo_trigger_err[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; +} __packed; + +enum ath12k_htt_tx_selfgen_sch_tsflag_error_stats { + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_FLUSH_RCVD_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_FILT_SCHED_CMD_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_MISMATCH_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_MIMO_CTRL_MISMATCH_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_BW_MISMATCH_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RETRY_COUNT_FAIL_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_TOO_LATE_RECEIVED_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_SIFS_STALL_NO_NEXT_CMD_ERR, + + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS +}; + +struct ath12k_htt_tx_selfgen_ac_sched_status_stats_tlv { + __le32 ac_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ac_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ac_mu_mimo_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_mu_mimo_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; +} __packed; + +struct ath12k_htt_tx_selfgen_ax_sched_status_stats_tlv { + __le32 ax_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_mu_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_mu_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_mu_bar_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_mu_bar_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_basic_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_basic_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_ulmumimo_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_ulmumimo_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; +} __packed; + +struct ath12k_htt_tx_selfgen_be_sched_status_stats_tlv { + __le32 be_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_mu_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_mu_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_mu_bar_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_mu_bar_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_basic_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_basic_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_ulmumimo_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_ulmumimo_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; +} __packed; + +struct ath12k_htt_stats_string_tlv { + DECLARE_FLEX_ARRAY(__le32, data); +} __packed; + +#define ATH12K_HTT_SRING_STATS_MAC_ID GENMASK(7, 0) +#define ATH12K_HTT_SRING_STATS_RING_ID GENMASK(15, 8) +#define ATH12K_HTT_SRING_STATS_ARENA GENMASK(23, 16) +#define ATH12K_HTT_SRING_STATS_EP BIT(24) +#define ATH12K_HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0) +#define ATH12K_HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16) +#define ATH12K_HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0) +#define ATH12K_HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16) +#define ATH12K_HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0) +#define ATH12K_HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16) +#define ATH12K_HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0) +#define ATH12K_HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16) + +struct ath12k_htt_sring_stats_tlv { + __le32 mac_id__ring_id__arena__ep; + __le32 base_addr_lsb; + __le32 base_addr_msb; + __le32 ring_size; + __le32 elem_size; + __le32 num_avail_words__num_valid_words; + __le32 head_ptr__tail_ptr; + __le32 consumer_empty__producer_full; + __le32 prefetch_count__internal_tail_ptr; +} __packed; + +struct ath12k_htt_sfm_cmn_tlv { + __le32 mac_id__word; + __le32 buf_total; + __le32 mem_empty; + __le32 deallocate_bufs; + __le32 num_records; +} __packed; + +struct ath12k_htt_sfm_client_tlv { + __le32 client_id; + __le32 buf_min; + __le32 buf_max; + __le32 buf_busy; + __le32 buf_alloc; + __le32 buf_avail; + __le32 num_users; +} __packed; + +struct ath12k_htt_sfm_client_user_tlv { + DECLARE_FLEX_ARRAY(__le32, dwords_used_by_user_n); +} __packed; + +struct ath12k_htt_tx_pdev_mu_mimo_sch_stats_tlv { + __le32 mu_mimo_sch_posted; + __le32 mu_mimo_sch_failed; + __le32 mu_mimo_ppdu_posted; + __le32 ac_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS]; + __le32 ax_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS]; + __le32 ax_ofdma_sch_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_ofdma_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_ofdma_bsr_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_ofdma_bar_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_ofdma_brp_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_mumimo_nusers[ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS]; + __le32 ax_ul_mumimo_brp_nusers[ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS]; + __le32 ac_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS]; + __le32 ax_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS]; + __le32 be_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; + __le32 be_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; + __le32 ac_mu_mimo_grp_sz_ext[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS]; +} __packed; + +struct ath12k_htt_tx_pdev_mumimo_grp_stats_tlv { + __le32 dl_mumimo_grp_best_grp_size[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ]; + __le32 dl_mumimo_grp_best_num_usrs[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS]; + __le32 dl_mumimo_grp_eligible[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ]; + __le32 dl_mumimo_grp_ineligible[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ]; + __le32 dl_mumimo_grp_invalid[ATH12K_HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS]; + __le32 dl_mumimo_grp_tputs[ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS]; + __le32 ul_mumimo_grp_best_grp_size[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ]; + __le32 ul_mumimo_grp_best_usrs[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS]; + __le32 ul_mumimo_grp_tputs[ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS]; +} __packed; + +enum ath12k_htt_stats_tx_sched_modes { + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC = 0, + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX, + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX, + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_BE, + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_BE +}; + +struct ath12k_htt_tx_pdev_mpdu_stats_tlv { + __le32 mpdus_queued_usr; + __le32 mpdus_tried_usr; + __le32 mpdus_failed_usr; + __le32 mpdus_requeued_usr; + __le32 err_no_ba_usr; + __le32 mpdu_underrun_usr; + __le32 ampdu_underrun_usr; + __le32 user_index; + __le32 tx_sched_mode; +} __packed; + +struct ath12k_htt_pdev_stats_cca_counters_tlv { + __le32 tx_frame_usec; + __le32 rx_frame_usec; + __le32 rx_clear_usec; + __le32 my_rx_frame_usec; + __le32 usec_cnt; + __le32 med_rx_idle_usec; + __le32 med_tx_idle_global_usec; + __le32 cca_obss_usec; +} __packed; + +struct ath12k_htt_pdev_cca_stats_hist_v1_tlv { + __le32 chan_num; + __le32 num_records; + __le32 valid_cca_counters_bitmap; + __le32 collection_interval; +} __packed; + +struct ath12k_htt_pdev_obss_pd_stats_tlv { + __le32 num_obss_tx_ppdu_success; + __le32 num_obss_tx_ppdu_failure; + __le32 num_sr_tx_transmissions; + __le32 num_spatial_reuse_opportunities; + __le32 num_non_srg_opportunities; + __le32 num_non_srg_ppdu_tried; + __le32 num_non_srg_ppdu_success; + __le32 num_srg_opportunities; + __le32 num_srg_ppdu_tried; + __le32 num_srg_ppdu_success; + __le32 num_psr_opportunities; + __le32 num_psr_ppdu_tried; + __le32 num_psr_ppdu_success; + __le32 num_non_srg_tried_per_ac[ATH12K_HTT_NUM_AC_WMM]; + __le32 num_non_srg_success_ac[ATH12K_HTT_NUM_AC_WMM]; + __le32 num_srg_tried_per_ac[ATH12K_HTT_NUM_AC_WMM]; + __le32 num_srg_success_per_ac[ATH12K_HTT_NUM_AC_WMM]; + __le32 num_obss_min_dur_check_flush_cnt; + __le32 num_sr_ppdu_abort_flush_cnt; +} __packed; + +#define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 +#define ATH12K_HTT_TXBF_NUM_BW_CNTRS 5 +#define ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES 2 + +struct ath12k_htt_pdev_txrate_txbf_stats_tlv { + __le32 tx_su_txbf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS]; + __le32 tx_su_ibf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS]; + __le32 tx_su_ol_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS]; + __le32 tx_su_txbf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 tx_su_ibf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 tx_su_ol_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 tx_su_txbf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS]; + __le32 tx_su_ibf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS]; + __le32 tx_su_ol_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS]; + __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS]; + __le32 txbf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS]; + __le32 ibf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS]; + __le32 ol[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS]; + __le32 txbf_flag_set_mu_mode; + __le32 txbf_flag_set_final_status; + __le32 txbf_flag_not_set_verified_txbf_mode; + __le32 txbf_flag_not_set_disable_p2p_access; + __le32 txbf_flag_not_set_max_nss_in_he160; + __le32 txbf_flag_not_set_disable_uldlofdma; + __le32 txbf_flag_not_set_mcs_threshold_val; + __le32 txbf_flag_not_set_final_status; +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t { + __le32 ax_ofdma_ndpa_queued; + __le32 ax_ofdma_ndpa_tried; + __le32 ax_ofdma_ndpa_flush; + __le32 ax_ofdma_ndpa_err; +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv { + __le32 num_elems_ax_ndpa_arr; + __le32 arr_elem_size_ax_ndpa; + DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t, ax_ndpa); +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t { + __le32 ax_ofdma_ndp_queued; + __le32 ax_ofdma_ndp_tried; + __le32 ax_ofdma_ndp_flush; + __le32 ax_ofdma_ndp_err; +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv { + __le32 num_elems_ax_ndp_arr; + __le32 arr_elem_size_ax_ndp; + DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t, ax_ndp); +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t { + __le32 ax_ofdma_brp_queued; + __le32 ax_ofdma_brp_tried; + __le32 ax_ofdma_brp_flushed; + __le32 ax_ofdma_brp_err; + __le32 ax_ofdma_num_cbf_rcvd; +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv { + __le32 num_elems_ax_brp_arr; + __le32 arr_elem_size_ax_brp; + DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t, ax_brp); +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t { + __le32 num_ppdu_steer; + __le32 num_ppdu_ol; + __le32 num_usr_prefetch; + __le32 num_usr_sound; + __le32 num_usr_force_sound; +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv { + __le32 num_elems_ax_steer_arr; + __le32 arr_elem_size_ax_steer; + DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t, ax_steer); +} __packed; + +struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv { + __le32 ax_ofdma_rbo_steer_mpdus_tried; + __le32 ax_ofdma_rbo_steer_mpdus_failed; + __le32 ax_ofdma_sifs_steer_mpdus_tried; + __le32 ax_ofdma_sifs_steer_mpdus_failed; +} __packed; + +enum ath12k_htt_stats_page_lock_state { + ATH12K_HTT_STATS_PAGE_LOCKED = 0, + ATH12K_HTT_STATS_PAGE_UNLOCKED = 1, + ATH12K_NUM_PG_LOCK_STATE +}; + +#define ATH12K_PAGER_MAX 10 + +#define ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0 GENMASK(7, 0) +#define ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0 GENMASK(15, 8) +#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1 GENMASK(15, 0) +#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1 GENMASK(31, 16) +#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2 GENMASK(15, 0) +#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2 GENMASK(31, 16) + +struct ath12k_htt_pgs_info { + __le32 page_num; + __le32 num_pgs; + __le32 ts_lsb; + __le32 ts_msb; +} __packed; + +struct ath12k_htt_dl_pager_stats_tlv { + __le32 info0; + __le32 info1; + __le32 info2; + struct ath12k_htt_pgs_info pgs_info[ATH12K_NUM_PG_LOCK_STATE][ATH12K_PAGER_MAX]; +} __packed; + +#define ATH12K_HTT_STATS_MAX_CHAINS 8 +#define ATH12K_HTT_MAX_RX_PKT_CNT 8 +#define ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT 8 +#define ATH12K_HTT_MAX_PER_BLK_ERR_CNT 20 +#define ATH12K_HTT_MAX_RX_OTA_ERR_CNT 14 +#define ATH12K_HTT_MAX_CH_PWR_INFO_SIZE 16 + +struct ath12k_htt_phy_stats_tlv { + a_sle32 nf_chain[ATH12K_HTT_STATS_MAX_CHAINS]; + __le32 false_radar_cnt; + __le32 radar_cs_cnt; + a_sle32 ani_level; + __le32 fw_run_time; + a_sle32 runtime_nf_chain[ATH12K_HTT_STATS_MAX_CHAINS]; +} __packed; + +struct ath12k_htt_phy_counters_tlv { + __le32 rx_ofdma_timing_err_cnt; + __le32 rx_cck_fail_cnt; + __le32 mactx_abort_cnt; + __le32 macrx_abort_cnt; + __le32 phytx_abort_cnt; + __le32 phyrx_abort_cnt; + __le32 phyrx_defer_abort_cnt; + __le32 rx_gain_adj_lstf_event_cnt; + __le32 rx_gain_adj_non_legacy_cnt; + __le32 rx_pkt_cnt[ATH12K_HTT_MAX_RX_PKT_CNT]; + __le32 rx_pkt_crc_pass_cnt[ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT]; + __le32 per_blk_err_cnt[ATH12K_HTT_MAX_PER_BLK_ERR_CNT]; + __le32 rx_ota_err_cnt[ATH12K_HTT_MAX_RX_OTA_ERR_CNT]; +} __packed; + +struct ath12k_htt_phy_reset_stats_tlv { + __le32 pdev_id; + __le32 chan_mhz; + __le32 chan_band_center_freq1; + __le32 chan_band_center_freq2; + __le32 chan_phy_mode; + __le32 chan_flags; + __le32 chan_num; + __le32 reset_cause; + __le32 prev_reset_cause; + __le32 phy_warm_reset_src; + __le32 rx_gain_tbl_mode; + __le32 xbar_val; + __le32 force_calibration; + __le32 phyrf_mode; + __le32 phy_homechan; + __le32 phy_tx_ch_mask; + __le32 phy_rx_ch_mask; + __le32 phybb_ini_mask; + __le32 phyrf_ini_mask; + __le32 phy_dfs_en_mask; + __le32 phy_sscan_en_mask; + __le32 phy_synth_sel_mask; + __le32 phy_adfs_freq; + __le32 cck_fir_settings; + __le32 phy_dyn_pri_chan; + __le32 cca_thresh; + __le32 dyn_cca_status; + __le32 rxdesense_thresh_hw; + __le32 rxdesense_thresh_sw; +} __packed; + +struct ath12k_htt_phy_reset_counters_tlv { + __le32 pdev_id; + __le32 cf_active_low_fail_cnt; + __le32 cf_active_low_pass_cnt; + __le32 phy_off_through_vreg_cnt; + __le32 force_calibration_cnt; + __le32 rf_mode_switch_phy_off_cnt; + __le32 temperature_recal_cnt; +} __packed; + +struct ath12k_htt_phy_tpc_stats_tlv { + __le32 pdev_id; + __le32 tx_power_scale; + __le32 tx_power_scale_db; + __le32 min_negative_tx_power; + __le32 reg_ctl_domain; + __le32 max_reg_allowed_power[ATH12K_HTT_STATS_MAX_CHAINS]; + __le32 max_reg_allowed_power_6ghz[ATH12K_HTT_STATS_MAX_CHAINS]; + __le32 twice_max_rd_power; + __le32 max_tx_power; + __le32 home_max_tx_power; + __le32 psd_power; + __le32 eirp_power; + __le32 power_type_6ghz; + __le32 sub_band_cfreq[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE]; + __le32 sub_band_txpower[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE]; +} __packed; + +struct ath12k_htt_t2h_soc_txrx_stats_common_tlv { + __le32 inv_peers_msdu_drop_count_hi; + __le32 inv_peers_msdu_drop_count_lo; +} __packed; + +#define ATH12K_HTT_AST_PDEV_ID_INFO GENMASK(1, 0) +#define ATH12K_HTT_AST_VDEV_ID_INFO GENMASK(9, 2) +#define ATH12K_HTT_AST_NEXT_HOP_INFO BIT(10) +#define ATH12K_HTT_AST_MCAST_INFO BIT(11) +#define ATH12K_HTT_AST_MONITOR_DIRECT_INFO BIT(12) +#define ATH12K_HTT_AST_MESH_STA_INFO BIT(13) +#define ATH12K_HTT_AST_MEC_INFO BIT(14) +#define ATH12K_HTT_AST_INTRA_BSS_INFO BIT(15) + +struct ath12k_htt_ast_entry_tlv { + __le32 sw_peer_id; + __le32 ast_index; + struct htt_mac_addr mac_addr; + __le32 info; +} __packed; + +enum ath12k_htt_stats_direction { + ATH12K_HTT_STATS_DIRECTION_TX, + ATH12K_HTT_STATS_DIRECTION_RX +}; + +enum ath12k_htt_stats_ppdu_type { + ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU, + ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO, + ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO, + ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA, + ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA +}; + +enum ath12k_htt_stats_param_type { + ATH12K_HTT_STATS_PREAM_OFDM, + ATH12K_HTT_STATS_PREAM_CCK, + ATH12K_HTT_STATS_PREAM_HT, + ATH12K_HTT_STATS_PREAM_VHT, + ATH12K_HTT_STATS_PREAM_HE, + ATH12K_HTT_STATS_PREAM_EHT, + ATH12K_HTT_STATS_PREAM_RSVD1, + ATH12K_HTT_STATS_PREAM_COUNT, +}; + +#define ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT 32 + +struct ath12k_htt_pdev_puncture_stats_tlv { + __le32 mac_id__word; + __le32 direction; + __le32 preamble; + __le32 ppdu_type; + __le32 subband_cnt; + __le32 last_used_pattern_mask; + __le32 num_subbands_used_cnt[ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT]; +} __packed; + +struct ath12k_htt_dmac_reset_stats_tlv { + __le32 reset_count; + __le32 reset_time_lo_ms; + __le32 reset_time_hi_ms; + __le32 disengage_time_lo_ms; + __le32 disengage_time_hi_ms; + __le32 engage_time_lo_ms; + __le32 engage_time_hi_ms; + __le32 disengage_count; + __le32 engage_count; + __le32 drain_dest_ring_mask; +} __packed; + +struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv { + __le32 mac_id__word; + __le32 rate_based_dlofdma_enabled_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 rate_based_dlofdma_disabled_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 rate_based_dlofdma_probing_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 rate_based_dlofdma_monitor_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 chan_acc_lat_based_dlofdma_enabled_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 chan_acc_lat_based_dlofdma_disabled_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 chan_acc_lat_based_dlofdma_monitor_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 downgrade_to_dl_su_ru_alloc_fail[ATH12K_HTT_NUM_AC_WMM]; + __le32 candidate_list_single_user_disable_ofdma[ATH12K_HTT_NUM_AC_WMM]; + __le32 dl_cand_list_dropped_high_ul_qos_weight[ATH12K_HTT_NUM_AC_WMM]; + __le32 ax_dlofdma_disabled_due_to_pipelining[ATH12K_HTT_NUM_AC_WMM]; + __le32 dlofdma_disabled_su_only_eligible[ATH12K_HTT_NUM_AC_WMM]; + __le32 dlofdma_disabled_consec_no_mpdus_tried[ATH12K_HTT_NUM_AC_WMM]; + __le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM]; +} __packed; + +#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS 4 +#define ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS 8 +#define ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS 14 + +enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE { + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52_26, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106_26, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_242, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484_242, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484_242, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2_484, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3_484, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x4, + ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS, +}; + +enum ATH12K_HTT_RC_MODE { + ATH12K_HTT_RC_MODE_SU_OL, + ATH12K_HTT_RC_MODE_SU_BF, + ATH12K_HTT_RC_MODE_MU1_INTF, + ATH12K_HTT_RC_MODE_MU2_INTF, + ATH12K_HTT_RC_MODE_MU3_INTF, + ATH12K_HTT_RC_MODE_MU4_INTF, + ATH12K_HTT_RC_MODE_MU5_INTF, + ATH12K_HTT_RC_MODE_MU6_INTF, + ATH12K_HTT_RC_MODE_MU7_INTF, + ATH12K_HTT_RC_MODE_2D_COUNT +}; + +enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE { + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2, + ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS +}; + +enum ath12k_htt_stats_rc_mode { + ATH12K_HTT_STATS_RC_MODE_DLSU = 0, + ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1, + ATH12K_HTT_STATS_RC_MODE_DLOFDMA = 2, + ATH12K_HTT_STATS_RC_MODE_ULMUMIMO = 3, + ATH12K_HTT_STATS_RC_MODE_ULOFDMA = 4, +}; + +enum ath12k_htt_stats_ru_type { + ATH12K_HTT_STATS_RU_TYPE_INVALID, + ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY, + ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU, +}; + +struct ath12k_htt_tx_rate_stats { + __le32 ppdus_tried; + __le32 ppdus_ack_failed; + __le32 mpdus_tried; + __le32 mpdus_failed; +} __packed; + +struct ath12k_htt_tx_per_rate_stats_tlv { + __le32 rc_mode; + __le32 last_probed_mcs; + __le32 last_probed_nss; + __le32 last_probed_bw; + struct ath12k_htt_tx_rate_stats per_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS]; + struct ath12k_htt_tx_rate_stats per_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS]; + struct ath12k_htt_tx_rate_stats per_mcs[ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS]; + struct ath12k_htt_tx_rate_stats per_bw320; + __le32 probe_cnt[ATH12K_HTT_RC_MODE_2D_COUNT]; + __le32 ru_type; + struct ath12k_htt_tx_rate_stats ru[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS]; +} __packed; + +#define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16 +#define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5 +#define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4 +#define ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS 4 + +struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv { + __le32 mac_id__word; + __le32 be_ofdma_tx_ldpc; + __le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS]; + __le32 be_ofdma_tx_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS]; + __le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS]; + __le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS]; + __le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS]; + __le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS]; +} __packed; + +struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv { + __le32 mac_id__word; + __le32 basic_trigger_across_bss; + __le32 basic_trigger_within_bss; + __le32 bsr_trigger_across_bss; + __le32 bsr_trigger_within_bss; + __le32 mu_rts_across_bss; + __le32 mu_rts_within_bss; + __le32 ul_mumimo_trigger_across_bss; + __le32 ul_mumimo_trigger_within_bss; +} __packed; + +#endif @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <crypto/hash.h> @@ -14,6 +14,11 @@ #include "peer.h" #include "dp_mon.h" +enum ath12k_dp_desc_type { + ATH12K_DP_TX_DESC, + ATH12K_DP_RX_DESC, +}; + static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab, struct sk_buff *skb) { @@ -36,8 +41,14 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr) return; } + if (!peer->primary_link) { + spin_unlock_bh(&ab->base_lock); + return; + } + ath12k_dp_rx_peer_tid_cleanup(ar, peer); crypto_free_shash(peer->tfm_mmic); + peer->dp_setup_done = false; spin_unlock_bh(&ab->base_lock); } @@ -126,7 +137,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, enum hal_ring_type type, int ring_num) { + const struct ath12k_hal_tcl_to_wbm_rbm_map *map; const u8 *grp_mask; + int i; switch (type) { case HAL_WBM2SW_RELEASE: @@ -134,6 +147,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0]; ring_num = 0; } else { + map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map; + for (i = 0; i < ab->hw_params->max_tx_ring; i++) { + if (ring_num == map[i].wbm_ring_num) { + ring_num = i; + break; + } + } + grp_mask = &ab->hw_params->ring_mask->tx[0]; } break; @@ -311,20 +332,22 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring, } static -u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif) +u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, + struct ath12k_link_vif *arvif) { u32 bank_config = 0; + struct ath12k_vif *ahvif = arvif->ahvif; /* Only valid for raw frames with HW crypto enabled. * With SW crypto, mac80211 sets key per packet */ - if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && + if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) bank_config |= - u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher), + u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher), HAL_TX_BANK_CONFIG_ENCRYPT_TYPE); - bank_config |= u32_encode_bits(arvif->tx_encap_type, + bank_config |= u32_encode_bits(ahvif->tx_encap_type, HAL_TX_BANK_CONFIG_ENCAP_TYPE); bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) | u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) | @@ -339,7 +362,7 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif HAL_TX_ADDRY_EN), HAL_TX_BANK_CONFIG_ADDRY_EN); - bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0, + bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(ahvif->vif) ? 3 : 0, HAL_TX_BANK_CONFIG_MESH_EN) | u32_encode_bits(arvif->vdev_id_check_en, HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN); @@ -349,7 +372,8 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif return bank_config; } -static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif, +static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, + struct ath12k_link_vif *arvif, struct ath12k_dp *dp) { int bank_id = DP_INVALID_BANK_ID; @@ -451,8 +475,6 @@ static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab) ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); } - ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring); - ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); } @@ -473,20 +495,6 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab) goto err; } - ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, - DP_TCL_CMD_RING_SIZE); - if (ret) { - ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); - goto err; - } - - ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, - 0, 0, DP_TCL_STATUS_RING_SIZE); - if (ret) { - ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); - goto err; - } - for (i = 0; i < ab->hw_params->max_tx_ring; i++) { map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map; tx_comp_ring_num = map[i].wbm_ring_num; @@ -610,6 +618,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab, int i; int ret = 0; u32 end_offset, cookie; + enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm; n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK); @@ -640,7 +649,8 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab, paddr = link_desc_banks[i].paddr; while (n_entries) { cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i); - ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr); + ath12k_hal_set_link_desc_addr(scatter_buf, cookie, + paddr, rbm); n_entries--; paddr += HAL_LINK_DESC_SIZE; if (rem_entries) { @@ -784,6 +794,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab, u32 paddr; int i, ret; u32 cookie; + enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm; tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; tot_mem_sz += HAL_LINK_DESC_ALIGN; @@ -844,8 +855,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab, while (n_entries && (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) { cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i); - ath12k_hal_set_link_desc_addr(desc, - cookie, paddr); + ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm); n_entries--; paddr += HAL_LINK_DESC_SIZE; } @@ -875,11 +885,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, enum dp_monitor_mode monitor_mode; u8 ring_mask; - while (i < ab->hw_params->max_tx_ring) { - if (ab->hw_params->ring_mask->tx[grp_id] & - BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num)) - ath12k_dp_tx_completion_handler(ab, i); - i++; + if (ab->hw_params->ring_mask->tx[grp_id]) { + i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1; + ath12k_dp_tx_completion_handler(ab, i); } if (ab->hw_params->ring_mask->rx_err[grp_id]) { @@ -915,8 +923,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, monitor_mode = ATH12K_DP_RX_MONITOR_MODE; ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id]; for (i = 0; i < ab->num_radios; i++) { - for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) { - int id = i * ab->hw_params->num_rxmda_per_pdev + j; + for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { + int id = i * ab->hw_params->num_rxdma_per_pdev + j; if (ring_mask & BIT(id)) { work_done = @@ -936,8 +944,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, monitor_mode = ATH12K_DP_TX_MONITOR_MODE; ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id]; for (i = 0; i < ab->num_radios; i++) { - for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) { - int id = i * ab->hw_params->num_rxmda_per_pdev + j; + for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { + int id = i * ab->hw_params->num_rxdma_per_pdev + j; if (ring_mask & BIT(id)) { work_done = @@ -959,10 +967,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, if (ab->hw_params->ring_mask->host2rxdma[grp_id]) { struct ath12k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + LIST_HEAD(list); - ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, 0, - ab->hw_params->hal_params->rx_buf_rbm, - true); + ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); } /* TODO: Implement handler for other interrupts */ @@ -975,27 +982,46 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab) { int i; + if (!ab->mon_reap_timer.function) + return; + del_timer_sync(&ab->mon_reap_timer); for (i = 0; i < ab->num_radios; i++) ath12k_dp_rx_pdev_free(ab, i); } -void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab) +void ath12k_dp_pdev_pre_alloc(struct ath12k *ar) { - struct ath12k *ar; - struct ath12k_pdev_dp *dp; - int i; + struct ath12k_pdev_dp *dp = &ar->dp; - for (i = 0; i < ab->num_radios; i++) { - ar = ab->pdevs[i].ar; - dp = &ar->dp; - dp->mac_id = i; - atomic_set(&dp->num_tx_pending, 0); - init_waitqueue_head(&dp->tx_empty_waitq); + dp->mac_id = ar->pdev_idx; + atomic_set(&dp->num_tx_pending, 0); + init_waitqueue_head(&dp->tx_empty_waitq); + /* TODO: Add any RXDMA setup required per pdev */ +} - /* TODO: Add any RXDMA setup required per pdev */ +bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab) +{ + if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) && + ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start && + ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end && + ab->hw_params->hal_ops->get_hal_rx_compact_ops) { + return true; + } + return false; +} + +void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab) +{ + if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { + /* RX TLVS compaction is supported, hence change the hal_rx_ops + * to compact hal_rx_ops. + */ + ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops(); } + ab->hal.hal_desc_sz = + ab->hal_rx_ops->rx_desc_get_desc_size(); } static void ath12k_dp_service_mon_ring(struct timer_list *t) @@ -1003,7 +1029,7 @@ static void ath12k_dp_service_mon_ring(struct timer_list *t) struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer); int i; - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET, ATH12K_DP_RX_MONITOR_MODE); @@ -1077,9 +1103,9 @@ int ath12k_dp_htt_connect(struct ath12k_dp *dp) return 0; } -static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif) +static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif) { - switch (arvif->vdev_type) { + switch (arvif->ahvif->vdev_type) { case WMI_VDEV_TYPE_STA: /* TODO: Verify the search type and flags since ast hash * is not part of peer mapv3 @@ -1098,7 +1124,7 @@ static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif) } } -void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif) +void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif) { struct ath12k_base *ab = ar->ab; @@ -1124,11 +1150,14 @@ void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif) static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) { - struct ath12k_rx_desc_info *desc_info, *tmp; + struct ath12k_rx_desc_info *desc_info; struct ath12k_tx_desc_info *tx_desc_info, *tmp1; struct ath12k_dp *dp = &ab->dp; + struct ath12k_skb_cb *skb_cb; struct sk_buff *skb; - int i; + struct ath12k *ar; + int i, j; + u32 pool_id, tx_spt_page; if (!dp->spt_info) return; @@ -1136,16 +1165,31 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) /* RX Descriptor cleanup */ spin_lock_bh(&dp->rx_desc_lock); - list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) { - list_del(&desc_info->list); - skb = desc_info->skb; + for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { + desc_info = dp->rxbaddr[i]; + + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { + if (!desc_info[j].in_use) { + list_del(&desc_info[j].list); + continue; + } - if (!skb) + skb = desc_info[j].skb; + if (!skb) + continue; + + dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } + } + + for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { + if (!dp->rxbaddr[i]) continue; - dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, - skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); + kfree(dp->rxbaddr[i]); + dp->rxbaddr[i] = NULL; } spin_unlock_bh(&dp->rx_desc_lock); @@ -1162,6 +1206,17 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) if (!skb) continue; + /* if we are unregistering, hw would've been destroyed and + * ar is no longer valid. + */ + if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) { + skb_cb = ATH12K_SKB_CB(skb); + ar = skb_cb->ar; + + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); + } + dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); @@ -1170,6 +1225,21 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) spin_unlock_bh(&dp->tx_desc_lock[i]); } + for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { + spin_lock_bh(&dp->tx_desc_lock[pool_id]); + + for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) { + tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + if (!dp->txbaddr[tx_spt_page]) + continue; + + kfree(dp->txbaddr[tx_spt_page]); + dp->txbaddr[tx_spt_page] = NULL; + } + + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); + } + /* unmap SPT pages */ for (i = 0; i < dp->num_spt_pages; i++) { if (!dp->spt_info[i].vaddr) @@ -1181,6 +1251,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) } kfree(dp->spt_info); + dp->spt_info = NULL; } static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab) @@ -1190,15 +1261,23 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab) if (!ab->hw_params->reoq_lut_support) return; - if (!dp->reoq_lut.vaddr) - return; - - dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, - dp->reoq_lut.vaddr, dp->reoq_lut.paddr); - dp->reoq_lut.vaddr = NULL; + if (dp->reoq_lut.vaddr) { + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_REO_REG + + HAL_REO1_QDESC_LUT_BASE0(ab), 0); + dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, + dp->reoq_lut.vaddr, dp->reoq_lut.paddr); + dp->reoq_lut.vaddr = NULL; + } - ath12k_hif_write32(ab, - HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0); + if (dp->ml_reoq_lut.vaddr) { + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_REO_REG + + HAL_REO1_QDESC_LUT_BASE1(ab), 0); + dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, + dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr); + dp->ml_reoq_lut.vaddr = NULL; + } } void ath12k_dp_free(struct ath12k_base *ab) @@ -1206,6 +1285,9 @@ void ath12k_dp_free(struct ath12k_base *ab) struct ath12k_dp *dp = &ab->dp; int i; + if (!dp->ab) + return; + ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks, HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); @@ -1216,11 +1298,14 @@ void ath12k_dp_free(struct ath12k_base *ab) ath12k_dp_rx_reo_cmd_list_cleanup(ab); - for (i = 0; i < ab->hw_params->max_tx_ring; i++) + for (i = 0; i < ab->hw_params->max_tx_ring; i++) { kfree(dp->tx_ring[i].tx_status); + dp->tx_ring[i].tx_status = NULL; + } ath12k_dp_rx_free(ab); /* Deinit any SOC level resource */ + dp->ab = NULL; } void ath12k_dp_cc_config(struct ath12k_base *ab) @@ -1289,16 +1374,22 @@ static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab, struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab, u32 cookie) { + struct ath12k_dp *dp = &ab->dp; struct ath12k_rx_desc_info **desc_addr_ptr; - u16 ppt_idx, spt_idx; + u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx; ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT); - spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT); + spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT); + + start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET; + end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES; - if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES || + if (ppt_idx < start_ppt_idx || + ppt_idx >= end_ppt_idx || spt_idx > ATH12K_MAX_SPT_ENTRIES) return NULL; + ppt_idx = ppt_idx - dp->rx_ppt_base; desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx); return *desc_addr_ptr; @@ -1308,13 +1399,17 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab, u32 cookie) { struct ath12k_tx_desc_info **desc_addr_ptr; - u16 ppt_idx, spt_idx; + u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx; ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT); - spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT); + spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT); - if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES || - ppt_idx > ab->dp.num_spt_pages || + start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET; + end_ppt_idx = start_ppt_idx + + (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES); + + if (ppt_idx < start_ppt_idx || + ppt_idx >= end_ppt_idx || spt_idx > ATH12K_MAX_SPT_ENTRIES) return NULL; @@ -1329,7 +1424,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr; struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr; u32 i, j, pool_id, tx_spt_page; - u32 ppt_idx; + u32 ppt_idx, cookie_ppt_idx; spin_lock_bh(&dp->rx_desc_lock); @@ -1343,13 +1438,18 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } + ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i; + cookie_ppt_idx = dp->rx_ppt_base + ppt_idx; + dp->rxbaddr[i] = &rx_descs[0]; + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { - rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j); + rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j); rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC; + rx_descs[j].device_id = ab->device_id; list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list); /* Update descriptor VA in SPT */ - rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j); + rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j); *rx_desc_addr = &rx_descs[j]; } } @@ -1368,9 +1468,12 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } + tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page; + + dp->txbaddr[tx_spt_page] = &tx_descs[0]; + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { - tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; - ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page; tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j); tx_descs[j].pool_id = pool_id; list_add_tail(&tx_descs[j].list, @@ -1387,14 +1490,57 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return 0; } +static int ath12k_dp_cmem_init(struct ath12k_base *ab, + struct ath12k_dp *dp, + enum ath12k_dp_desc_type type) +{ + u32 cmem_base; + int i, start, end; + + cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start; + + switch (type) { + case ATH12K_DP_TX_DESC: + start = ATH12K_TX_SPT_PAGE_OFFSET; + end = start + ATH12K_NUM_TX_SPT_PAGES; + break; + case ATH12K_DP_RX_DESC: + cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base); + start = ATH12K_RX_SPT_PAGE_OFFSET; + end = start + ATH12K_NUM_RX_SPT_PAGES; + break; + default: + ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type); + return -EINVAL; + } + + /* Write to PPT in CMEM */ + for (i = start; i < end; i++) + ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i), + dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET); + + return 0; +} + +void ath12k_dp_partner_cc_init(struct ath12k_base *ab) +{ + struct ath12k_hw_group *ag = ab->ag; + int i; + + for (i = 0; i < ag->num_devices; i++) { + if (ag->ab[i] == ab) + continue; + + ath12k_dp_cmem_init(ab, &ag->ab[i]->dp, ATH12K_DP_RX_DESC); + } +} + static int ath12k_dp_cc_init(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; int i, ret = 0; - u32 cmem_base; INIT_LIST_HEAD(&dp->rx_desc_free_list); - INIT_LIST_HEAD(&dp->rx_desc_used_list); spin_lock_init(&dp->rx_desc_lock); for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { @@ -1415,7 +1561,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab) return -ENOMEM; } - cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start; + dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES; for (i = 0; i < dp->num_spt_pages; i++) { dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev, @@ -1433,10 +1579,18 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab) ret = -EINVAL; goto free; } + } - /* Write to PPT in CMEM */ - ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i), - dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET); + ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC); + if (ret) { + ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret); + goto free; + } + + ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC); + if (ret) { + ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret); + goto free; } ret = ath12k_dp_cc_desc_init(ab); @@ -1467,11 +1621,44 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab) return -ENOMEM; } + dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev, + DP_REOQ_LUT_SIZE, + &dp->ml_reoq_lut.paddr, + GFP_KERNEL | __GFP_ZERO); + if (!dp->ml_reoq_lut.vaddr) { + ath12k_warn(ab, "failed to allocate memory for ML reoq table"); + dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, + dp->reoq_lut.vaddr, dp->reoq_lut.paddr); + dp->reoq_lut.vaddr = NULL; + return -ENOMEM; + } + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), dp->reoq_lut.paddr); + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab), + dp->ml_reoq_lut.paddr >> 8); + return 0; } +static enum hal_rx_buf_return_buf_manager +ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab) +{ + switch (ab->device_id) { + case 0: + return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST; + case 1: + return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST; + case 2: + return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST; + default: + ath12k_warn(ab, "invalid %d device id, so choose default rbm\n", + ab->device_id); + WARN_ON(1); + return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST; + } +} + int ath12k_dp_alloc(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; @@ -1488,6 +1675,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab) spin_lock_init(&dp->reo_cmd_lock); dp->reo_cmd_cache_flush_count = 0; + dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab); ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc); if (ret) { @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_H @@ -16,6 +16,7 @@ struct ath12k_base; struct ath12k_peer; struct ath12k_dp; struct ath12k_vif; +struct ath12k_link_vif; struct hal_tcl_status_ring; struct ath12k_ext_irq_grp; @@ -31,7 +32,7 @@ struct dp_srng { u32 ring_id; }; -struct dp_rxdma_ring { +struct dp_rxdma_mon_ring { struct dp_srng refill_buf_ring; struct idr bufs_idr; /* Protects bufs_idr */ @@ -39,6 +40,11 @@ struct dp_rxdma_ring { int bufs_max; }; +struct dp_rxdma_ring { + struct dp_srng refill_buf_ring; + int bufs_max; +}; + #define ATH12K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE) struct dp_tx_ring { @@ -145,7 +151,7 @@ struct ath12k_pdev_dp { #define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */ -#define DP_BA_WIN_SZ_MAX 256 +#define DP_BA_WIN_SZ_MAX 1024 #define DP_TCL_NUM_RING_MAX 4 @@ -165,6 +171,7 @@ struct ath12k_pdev_dp { #define DP_REO_CMD_RING_SIZE 128 #define DP_REO_STATUS_RING_SIZE 2048 #define DP_RXDMA_BUF_RING_SIZE 4096 +#define DP_RX_MAC_BUF_RING_SIZE 2048 #define DP_RXDMA_REFILL_RING_SIZE 2048 #define DP_RXDMA_ERR_DST_RING_SIZE 1024 #define DP_RXDMA_MON_STATUS_RING_SIZE 1024 @@ -217,6 +224,9 @@ struct ath12k_pdev_dp { #define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES) #define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES) +#define ATH12K_TX_SPT_PAGE_OFFSET 0 +#define ATH12K_RX_SPT_PAGE_OFFSET ATH12K_NUM_TX_SPT_PAGES + /* The SPT pages are divided for RX and TX, first block for RX * and remaining for TX */ @@ -239,7 +249,7 @@ struct ath12k_pdev_dp { #define ATH12K_CC_SPT_MSB 8 #define ATH12K_CC_PPT_MSB 19 #define ATH12K_CC_PPT_SHIFT 9 -#define ATH12k_DP_CC_COOKIE_SPT GENMASK(8, 0) +#define ATH12K_DP_CC_COOKIE_SPT GENMASK(8, 0) #define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9) #define DP_REO_QREF_NUM GENMASK(31, 16) @@ -276,6 +286,9 @@ struct ath12k_rx_desc_info { struct sk_buff *skb; u32 cookie; u32 magic; + u8 in_use : 1, + device_id : 3, + reserved : 4; }; struct ath12k_tx_desc_info { @@ -312,15 +325,15 @@ struct ath12k_dp { u8 htt_tgt_ver_major; u8 htt_tgt_ver_minor; struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX]; + enum hal_rx_buf_return_buf_manager idle_link_rbm; struct dp_srng wbm_idle_ring; struct dp_srng wbm_desc_rel_ring; - struct dp_srng tcl_cmd_ring; - struct dp_srng tcl_status_ring; struct dp_srng reo_reinject_ring; struct dp_srng rx_rel_ring; struct dp_srng reo_except_ring; struct dp_srng reo_cmd_ring; struct dp_srng reo_status_ring; + enum ath12k_peer_metadata_version peer_metadata_ver; struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX]; struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX]; struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX]; @@ -338,9 +351,11 @@ struct ath12k_dp { struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX]; struct ath12k_spt_info *spt_info; u32 num_spt_pages; + u32 rx_ppt_base; + struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES]; + struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES]; struct list_head rx_desc_free_list; - struct list_head rx_desc_used_list; - /* protects the free and used desc list */ + /* protects the free desc list */ spinlock_t rx_desc_lock; struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES]; @@ -351,9 +366,10 @@ struct ath12k_dp { struct dp_rxdma_ring rx_refill_buf_ring; struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV]; struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV]; - struct dp_rxdma_ring rxdma_mon_buf_ring; - struct dp_rxdma_ring tx_mon_buf_ring; + struct dp_rxdma_mon_ring rxdma_mon_buf_ring; + struct dp_rxdma_mon_ring tx_mon_buf_ring; struct ath12k_reo_q_addr_lut reoq_lut; + struct ath12k_reo_q_addr_lut ml_reoq_lut; }; /* HTT definitions */ @@ -369,8 +385,6 @@ struct ath12k_dp { /* peer meta data */ #define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2) -#define HTT_TX_WBM_COMP_STATUS_OFFSET 8 - /* HTT tx completion is overlaid in wbm_release_ring */ #define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13) #define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0) @@ -682,9 +696,9 @@ enum htt_stats_internal_ppdu_frametype { * * The message would appear as follows: * - * |31 26|25|24|23 16|15 8|7 0| - * |-----------------+----------------+----------------+---------------| - * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type | + * |31 29|28|27|26|25|24|23 16|15 8|7 0| + * |-------+--+--+--+--+--+-----------+----------------+---------------| + * | rsvd1 |ED|DT|OV|PS|SS| ring_id | pdev_id | msg_type | * |-------------------------------------------------------------------| * | rsvd2 | ring_buffer_size | * |-------------------------------------------------------------------| @@ -711,8 +725,14 @@ enum htt_stats_internal_ppdu_frametype { * More details can be got from enum htt_srng_ring_id * b'24 - status_swap: 1 is to swap status TLV * b'25 - pkt_swap: 1 is to swap packet TLV - * b'26:31 - rsvd1: reserved for future use - * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring, + * b'26 - rx_offset_valid (OV): flag to indicate rx offsets + * configuration fields are valid + * b'27 - drop_thresh_valid (DT): flag to indicate if the + * rx_drop_threshold field is valid + * b'28 - rx_mon_global_en: Enable/Disable global register + * configuration in Rx monitor module. + * b'29:31 - rsvd1: reserved for future use + * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring, * in byte units. * Valid only for HW_TO_SW_RING and SW_TO_HW_RING * - b'16:31 - rsvd2: Reserved for future use @@ -758,6 +778,11 @@ enum htt_stats_internal_ppdu_frametype { #define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16) #define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0) +#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23) +#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0) +#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16) +#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0) + enum htt_rx_filter_tlv_flags { HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0), HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1), @@ -1081,6 +1106,11 @@ struct htt_rx_ring_selection_cfg_cmd { __le32 rx_mpdu_offset; __le32 rx_msdu_offset; __le32 rx_attn_offset; + __le32 info2; + __le32 reserved[2]; + __le32 rx_mpdu_start_end_mask; + __le32 rx_msdu_end_word_mask; + __le32 info3; } __packed; struct htt_rx_ring_tlv_filter { @@ -1097,6 +1127,9 @@ struct htt_rx_ring_tlv_filter { u16 rx_msdu_end_offset; u16 rx_msdu_start_offset; u16 rx_attn_offset; + u16 rx_mpdu_start_wmask; + u16 rx_mpdu_end_wmask; + u32 rx_msdu_end_wmask; }; #define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0 @@ -1471,18 +1504,6 @@ struct htt_ppdu_stats_user_rate { #define HTT_TX_INFO_PEERID(_flags) \ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M) -struct htt_tx_ppdu_stats_info { - struct htt_tlv tlv_hdr; - __le32 tx_success_bytes; - __le32 tx_retry_bytes; - __le32 tx_failed_bytes; - __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */ - __le16 tx_success_msdus; - __le16 tx_retry_msdus; - __le16 tx_failed_msdus; - __le16 tx_duration; /* united in us */ -} __packed; - enum htt_ppdu_stats_usr_compln_status { HTT_PPDU_STATS_USER_STATUS_OK, HTT_PPDU_STATS_USER_STATUS_FILTERED, @@ -1777,6 +1798,18 @@ enum vdev_stats_offload_timer_duration { ATH12K_STATS_TIMER_DUR_2SEC = 3, }; +#define ATH12K_HTT_MAC_ADDR_L32_0 GENMASK(7, 0) +#define ATH12K_HTT_MAC_ADDR_L32_1 GENMASK(15, 8) +#define ATH12K_HTT_MAC_ADDR_L32_2 GENMASK(23, 16) +#define ATH12K_HTT_MAC_ADDR_L32_3 GENMASK(31, 24) +#define ATH12K_HTT_MAC_ADDR_H16_0 GENMASK(7, 0) +#define ATH12K_HTT_MAC_ADDR_H16_1 GENMASK(15, 8) + +struct htt_mac_addr { + __le32 mac_addr_l32; + __le32 mac_addr_h16; +} __packed; + static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr) { memcpy(addr, &addr_l32, 4); @@ -1787,12 +1820,13 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, struct ath12k_ext_irq_grp *irq_grp, int budget); int ath12k_dp_htt_connect(struct ath12k_dp *dp); -void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif); +void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif); void ath12k_dp_free(struct ath12k_base *ab); int ath12k_dp_alloc(struct ath12k_base *ab); void ath12k_dp_cc_config(struct ath12k_base *ab); +void ath12k_dp_partner_cc_init(struct ath12k_base *ab); int ath12k_dp_pdev_alloc(struct ath12k_base *ab); -void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab); +void ath12k_dp_pdev_pre_alloc(struct ath12k *ar); void ath12k_dp_pdev_free(struct ath12k_base *ab); int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id, int mac_id, enum hal_ring_type ring_type); @@ -1813,4 +1847,6 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab, u32 cookie); struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab, u32 desc_id); +bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab); +void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab); #endif @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "dp_mon.h" @@ -10,12 +10,10 @@ #include "dp_tx.h" #include "peer.h" -static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv, - struct hal_rx_user_status *rx_user_status) +static void +ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user, + struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; - rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->usr_resp_ref); rx_user_status->ul_ofdma_user_v0_word1 = @@ -23,24 +21,20 @@ static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv, } static void -ath12k_dp_mon_rx_populate_byte_count(void *rx_tlv, void *ppduinfo, +ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *stats, + void *ppduinfo, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; - u32 mpdu_ok_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_ok_cnt); - u32 mpdu_err_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_err_cnt); - rx_user_status->mpdu_ok_byte_count = - u32_get_bits(mpdu_ok_byte_count, - HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT); + le32_get_bits(stats->info7, + HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT); rx_user_status->mpdu_err_byte_count = - u32_get_bits(mpdu_err_byte_count, - HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT); + le32_get_bits(stats->info8, + HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT); } static void -ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv, +ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info, struct hal_rx_user_status *rx_user_status) { @@ -78,11 +72,9 @@ ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv, ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status); } -static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data, +static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig, struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_vht_sig_a_info *vht_sig = - (struct hal_rx_vht_sig_a_info *)tlv_data; u32 nsts, group_id, info0, info1; u8 gi_setting; @@ -124,11 +116,9 @@ static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data, u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING); } -static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data, +static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig, struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_ht_sig_info *ht_sig = - (struct hal_rx_ht_sig_info *)tlv_data; u32 info0 = __le32_to_cpu(ht_sig->info0); u32 info1 = __le32_to_cpu(ht_sig->info1); @@ -141,11 +131,9 @@ static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data, ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } -static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data, +static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_lsig_b_info *lsigb = - (struct hal_rx_lsig_b_info *)tlv_data; u32 info0 = __le32_to_cpu(lsigb->info0); u8 rate; @@ -175,11 +163,9 @@ static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data, ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } -static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data, +static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga, struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_lsig_a_info *lsiga = - (struct hal_rx_lsig_a_info *)tlv_data; u32 info0 = __le32_to_cpu(lsiga->info0); u8 rate; @@ -217,14 +203,13 @@ static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data, ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } -static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data, - struct hal_rx_mon_ppdu_info *ppdu_info) +static void +ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma, + struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma = - (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data; u32 info0, value; - info0 = __le32_to_cpu(he_sig_b2_ofdma->info0); + info0 = __le32_to_cpu(ofdma->info0); ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN; @@ -255,11 +240,10 @@ static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data, ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; } -static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data, - struct hal_rx_mon_ppdu_info *ppdu_info) +static void +ath12k_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu, + struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu = - (struct hal_rx_he_sig_b2_mu_info *)tlv_data; u32 info0, value; info0 = __le32_to_cpu(he_sig_b2_mu->info0); @@ -282,11 +266,10 @@ static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data, ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS); } -static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data, - struct hal_rx_mon_ppdu_info *ppdu_info) +static void +ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu, + struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu = - (struct hal_rx_he_sig_b1_mu_info *)tlv_data; u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0); u16 ru_tones; @@ -297,11 +280,10 @@ static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data, ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; } -static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data, - struct hal_rx_mon_ppdu_info *ppdu_info) +static void +ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl, + struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl = - (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data; u32 info0, info1, value; u16 he_gi = 0, he_ltf = 0; @@ -432,11 +414,9 @@ static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data, ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; } -static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data, +static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a, struct hal_rx_mon_ppdu_info *ppdu_info) { - struct hal_rx_he_sig_a_su_info *he_sig_a = - (struct hal_rx_he_sig_a_su_info *)tlv_data; u32 info0, info1, value; u32 dcm; u8 he_dcm = 0, he_stbc = 0; @@ -585,22 +565,30 @@ static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data, static enum hal_rx_mon_status ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, struct ath12k_mon_data *pmon, - u32 tlv_tag, u8 *tlv_data, u32 userid) + u32 tlv_tag, const void *tlv_data, + u32 userid) { struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; u32 info[7]; switch (tlv_tag) { case HAL_RX_PPDU_START: { - struct hal_rx_ppdu_start *ppdu_start = - (struct hal_rx_ppdu_start *)tlv_data; + const struct hal_rx_ppdu_start *ppdu_start = tlv_data; + + u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32, + ppdu_start->ppdu_start_ts_31_0); info[0] = __le32_to_cpu(ppdu_start->info0); - ppdu_info->ppdu_id = - u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID); - ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num); - ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts); + ppdu_info->ppdu_id = u32_get_bits(info[0], + HAL_RX_PPDU_START_INFO0_PPDU_ID); + + info[1] = __le32_to_cpu(ppdu_start->info1); + ppdu_info->chan_num = u32_get_bits(info[1], + HAL_RX_PPDU_START_INFO1_CHAN_NUM); + ppdu_info->freq = u32_get_bits(info[1], + HAL_RX_PPDU_START_INFO1_CHAN_FREQ); + ppdu_info->ppdu_ts = ppdu_ts; if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) { ppdu_info->last_ppdu_id = ppdu_info->ppdu_id; @@ -612,8 +600,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, break; } case HAL_RX_PPDU_END_USER_STATS: { - struct hal_rx_ppdu_end_user_stats *eu_stats = - (struct hal_rx_ppdu_end_user_stats *)tlv_data; + const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data; + u32 tid_bitmap; info[0] = __le32_to_cpu(eu_stats->info0); info[1] = __le32_to_cpu(eu_stats->info1); @@ -626,10 +614,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX); ppdu_info->fc_valid = u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID); - ppdu_info->tid = - ffs(u32_get_bits(info[6], - HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP) - - 1); + tid_bitmap = u32_get_bits(info[6], + HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP); + ppdu_info->tid = ffs(tid_bitmap) - 1; ppdu_info->tcp_msdu_count = u32_get_bits(info[4], HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT); @@ -670,8 +657,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, &ppdu_info->userstats[userid]; ppdu_info->num_users += 1; - ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats); - ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info, + ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats); + ath12k_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info, rxuser_stats); } ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]); @@ -679,8 +666,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, break; } case HAL_RX_PPDU_END_USER_STATS_EXT: { - struct hal_rx_ppdu_end_user_stats_ext *eu_stats = - (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data; + const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data; + ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1); ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2); ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3); @@ -726,40 +713,25 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, break; case HAL_PHYRX_RSSI_LEGACY: { - struct hal_rx_phyrx_rssi_legacy_info *rssi = - (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data; - u32 reception_type = 0; - u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]); + const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data; info[0] = __le32_to_cpu(rssi->info0); + info[1] = __le32_to_cpu(rssi->info1); /* TODO: Please note that the combined rssi will not be accurate * in MU case. Rssi in MU needs to be retrieved from * PHYRX_OTHER_RECEIVE_INFO TLV. */ ppdu_info->rssi_comb = - u32_get_bits(info[0], - HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB); - reception_type = - u32_get_bits(rssi_legacy_info, - HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION); - - switch (reception_type) { - case HAL_RECEPTION_TYPE_ULOFMDA: - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; - break; - case HAL_RECEPTION_TYPE_ULMIMO: - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; - break; - default: - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; - break; - } + u32_get_bits(info[1], + HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB); + + ppdu_info->bw = u32_get_bits(info[0], + HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW); break; } case HAL_RXPCU_PPDU_END_INFO: { - struct hal_rx_ppdu_end_duration *ppdu_rx_duration = - (struct hal_rx_ppdu_end_duration *)tlv_data; + const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data; info[0] = __le32_to_cpu(ppdu_rx_duration->info0); ppdu_info->rx_duration = @@ -770,8 +742,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, break; } case HAL_RX_MPDU_START: { - struct hal_rx_mpdu_start *mpdu_start = - (struct hal_rx_mpdu_start *)tlv_data; + const struct hal_rx_mpdu_start *mpdu_start = tlv_data; struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; u16 peer_id; @@ -799,9 +770,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, /* TODO: add msdu start parsing logic */ break; case HAL_MON_BUF_ADDR: { - struct dp_rxdma_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring; - struct dp_mon_packet_info *packet_info = - (struct dp_mon_packet_info *)tlv_data; + struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring; + const struct dp_mon_packet_info *packet_info = tlv_data; int buf_id = u32_get_bits(packet_info->cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); struct sk_buff *msdu; @@ -833,8 +803,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, break; } case HAL_RX_MSDU_END: { - struct rx_msdu_end_qcn9274 *msdu_end = - (struct rx_msdu_end_qcn9274 *)tlv_data; + const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data; bool is_first_msdu_in_mpdu; u16 msdu_end_info; @@ -862,27 +831,29 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, return HAL_RX_MON_STATUS_PPDU_NOT_DONE; } -static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu) +static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, + struct sk_buff *head_msdu, + struct sk_buff *tail_msdu) { u32 rx_pkt_offset, l2_hdr_offset; - rx_pkt_offset = ar->ab->hw_params->hal_desc_sz; - l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, - (struct hal_rx_desc *)msdu->data); - skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); + rx_pkt_offset = ar->ab->hal.hal_desc_sz; + l2_hdr_offset = + ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data); + skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset); } static struct sk_buff * -ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, - u32 mac_id, struct sk_buff *head_msdu, +ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id, + struct sk_buff *head_msdu, struct sk_buff *tail_msdu, struct ieee80211_rx_status *rxs, bool *fcs_err) { struct ath12k_base *ab = ar->ab; - struct sk_buff *msdu, *mpdu_buf, *prev_buf; - struct hal_rx_desc *rx_desc; + struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list; + struct hal_rx_desc *rx_desc, *tail_rx_desc; u8 *hdr_desc, *dest, decap_format; struct ieee80211_hdr_3addr *wh; - u32 err_bitmap; + u32 err_bitmap, frag_list_sum_len = 0; mpdu_buf = NULL; @@ -890,24 +861,30 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, goto err_merge_fail; rx_desc = (struct hal_rx_desc *)head_msdu->data; - err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); + tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data; + err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc); if (err_bitmap & HAL_RX_MPDU_ERR_FCS) *fcs_err = true; - decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc); + decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc); - ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); + ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs); if (decap_format == DP_RX_DECAP_TYPE_RAW) { - ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu); + ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu); prev_buf = head_msdu; msdu = head_msdu->next; + head_frag_list = NULL; while (msdu) { - ath12k_dp_mon_rx_msdus_set_payload(ar, msdu); + ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu); + + if (!head_frag_list) + head_frag_list = msdu; + frag_list_sum_len += msdu->len; prev_buf = msdu; msdu = msdu->next; } @@ -915,11 +892,18 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, prev_buf->next = NULL; skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); + if (head_frag_list) { + skb_shinfo(head_msdu)->frag_list = head_frag_list; + head_msdu->data_len = frag_list_sum_len; + head_msdu->len += head_msdu->data_len; + head_msdu->next = NULL; + } } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { u8 qos_pkt = 0; rx_desc = (struct hal_rx_desc *)head_msdu->data; - hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc); + hdr_desc = + ab->hal_rx_ops->rx_desc_get_msdu_payload(rx_desc); /* Base size */ wh = (struct ieee80211_hdr_3addr *)hdr_desc; @@ -930,7 +914,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, msdu = head_msdu; while (msdu) { - ath12k_dp_mon_rx_msdus_set_payload(ar, msdu); + ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); if (!dest) @@ -945,7 +929,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, goto err_merge_fail; ath12k_dbg(ab, ATH12K_DBG_DATA, - "mpdu_buf %pK mpdu_buf->len %u", + "mpdu_buf %p mpdu_buf->len %u", prev_buf, prev_buf->len); } else { ath12k_dbg(ab, ATH12K_DBG_DATA, @@ -959,7 +943,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, err_merge_fail: if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { ath12k_dbg(ab, ATH12K_DBG_DATA, - "err_merge_fail mpdu_buf %pK", mpdu_buf); + "err_merge_fail mpdu_buf %p", mpdu_buf); /* Free the head buffer */ dev_kfree_skb_any(mpdu_buf); } @@ -1088,12 +1072,18 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc); spin_lock_bh(&ar->ab->base_lock); peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu); - if (peer && peer->sta) + if (peer && peer->sta) { pubsta = peer->sta; + if (pubsta->valid_links) { + status->link_valid = 1; + status->link_id = peer->link_id; + } + } + spin_unlock_bh(&ar->ab->base_lock); ath12k_dbg(ar->ab, ATH12K_DBG_DATA, - "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %p len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, peer ? peer->addr : NULL, @@ -1106,6 +1096,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct (status->bw == RATE_INFO_BW_40) ? "40" : "", (status->bw == RATE_INFO_BW_80) ? "80" : "", (status->bw == RATE_INFO_BW_160) ? "160" : "", + (status->bw == RATE_INFO_BW_320) ? "320" : "", status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", status->rate_idx, status->nss, @@ -1131,11 +1122,11 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) rx_status->flag |= RX_FLAG_8023; - ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); + ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); } static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, - struct sk_buff *head_msdu, + struct sk_buff *head_msdu, struct sk_buff *tail_msdu, struct hal_rx_mon_ppdu_info *ppduinfo, struct napi_struct *napi) { @@ -1144,7 +1135,8 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, struct ieee80211_rx_status *rxs = &dp->rx_status; bool fcs_err = false; - mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu, + mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, + head_msdu, tail_msdu, rxs, &fcs_err); if (!mon_skb) goto mon_deliver_fail; @@ -1192,19 +1184,19 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon struct sk_buff *skb) { struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; - struct hal_tlv_hdr *tlv; + struct hal_tlv_64_hdr *tlv; enum hal_rx_mon_status hal_status; - u32 tlv_userid = 0; + u32 tlv_userid; u16 tlv_tag, tlv_len; u8 *ptr = skb->data; memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info)); do { - tlv = (struct hal_tlv_hdr *)ptr; - tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG); - tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN); - tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID); + tlv = (struct hal_tlv_64_hdr *)ptr; + tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); + tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); ptr += sizeof(*tlv); /* The actual length of PPDU_END is the combined length of many PHY @@ -1219,7 +1211,7 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon, tlv_tag, ptr, tlv_userid); ptr += tlv_len; - ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN); + ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN); if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE) break; @@ -1252,7 +1244,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, if (head_msdu && tail_msdu) { ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, - ppdu_info, napi); + tail_msdu, ppdu_info, napi); } kfree(mon_mpdu); @@ -1261,7 +1253,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, } int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab, - struct dp_rxdma_ring *buf_ring, + struct dp_rxdma_mon_ring *buf_ring, int req_entries) { struct hal_mon_buf_ring *mon_buf; @@ -1596,7 +1588,7 @@ ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) static enum dp_mon_tx_tlv_status ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, struct ath12k_mon_data *pmon, - u16 tlv_tag, u8 *tlv_data, u32 userid) + u16 tlv_tag, const void *tlv_data, u32 userid) { struct dp_mon_tx_ppdu_info *tx_ppdu_info; enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE; @@ -1606,8 +1598,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, switch (tlv_tag) { case HAL_TX_FES_SETUP: { - struct hal_tx_fes_setup *tx_fes_setup = - (struct hal_tx_fes_setup *)tlv_data; + const struct hal_tx_fes_setup *tx_fes_setup = tlv_data; info[0] = __le32_to_cpu(tx_fes_setup->info0); tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id); @@ -1618,8 +1609,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, } case HAL_TX_FES_STATUS_END: { - struct hal_tx_fes_status_end *tx_fes_status_end = - (struct hal_tx_fes_status_end *)tlv_data; + const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data; u32 tst_15_0, tst_31_16; info[0] = __le32_to_cpu(tx_fes_status_end->info0); @@ -1636,8 +1626,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, } case HAL_RX_RESPONSE_REQUIRED_INFO: { - struct hal_rx_resp_req_info *rx_resp_req_info = - (struct hal_rx_resp_req_info *)tlv_data; + const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data; u32 addr_32; u16 addr_16; @@ -1682,8 +1671,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, } case HAL_PCU_PPDU_SETUP_INIT: { - struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = - (struct hal_tx_pcu_ppdu_setup_init *)tlv_data; + const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data; u32 addr_32; u16 addr_16; @@ -1729,8 +1717,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, } case HAL_TX_QUEUE_EXTENSION: { - struct hal_tx_queue_exten *tx_q_exten = - (struct hal_tx_queue_exten *)tlv_data; + const struct hal_tx_queue_exten *tx_q_exten = tlv_data; info[0] = __le32_to_cpu(tx_q_exten->info0); @@ -1742,8 +1729,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, } case HAL_TX_FES_STATUS_START: { - struct hal_tx_fes_status_start *tx_fes_start = - (struct hal_tx_fes_status_start *)tlv_data; + const struct hal_tx_fes_status_start *tx_fes_start = tlv_data; info[0] = __le32_to_cpu(tx_fes_start->info0); @@ -1754,8 +1740,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, } case HAL_TX_FES_STATUS_PROT: { - struct hal_tx_fes_status_prot *tx_fes_status = - (struct hal_tx_fes_status_prot *)tlv_data; + const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data; u32 start_timestamp; u32 end_timestamp; @@ -1782,8 +1767,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, case HAL_TX_FES_STATUS_START_PPDU: case HAL_TX_FES_STATUS_START_PROT: { - struct hal_tx_fes_status_start_prot *tx_fes_stat_start = - (struct hal_tx_fes_status_start_prot *)tlv_data; + const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data; u64 ppdu_ts; info[0] = __le32_to_cpu(tx_fes_stat_start->info0); @@ -1798,8 +1782,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, } case HAL_TX_FES_STATUS_USER_PPDU: { - struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = - (struct hal_tx_fes_status_user_ppdu *)tlv_data; + const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data; info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0); @@ -1842,8 +1825,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, break; case HAL_RX_FRAME_BITMAP_ACK: { - struct hal_rx_frame_bitmap_ack *fbm_ack = - (struct hal_rx_frame_bitmap_ack *)tlv_data; + const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data; u32 addr_32; u16 addr_16; @@ -1861,8 +1843,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, } case HAL_MACTX_PHY_DESC: { - struct hal_tx_phy_desc *tx_phy_desc = - (struct hal_tx_phy_desc *)tlv_data; + const struct hal_tx_phy_desc *tx_phy_desc = tlv_data; info[0] = __le32_to_cpu(tx_phy_desc->info0); info[1] = __le32_to_cpu(tx_phy_desc->info1); @@ -1903,43 +1884,6 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, break; } - case HAL_MON_BUF_ADDR: { - struct dp_rxdma_ring *buf_ring = &ab->dp.tx_mon_buf_ring; - struct dp_mon_packet_info *packet_info = - (struct dp_mon_packet_info *)tlv_data; - int buf_id = u32_get_bits(packet_info->cookie, - DP_RXDMA_BUF_COOKIE_BUF_ID); - struct sk_buff *msdu; - struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu; - struct ath12k_skb_rxcb *rxcb; - - spin_lock_bh(&buf_ring->idr_lock); - msdu = idr_remove(&buf_ring->bufs_idr, buf_id); - spin_unlock_bh(&buf_ring->idr_lock); - - if (unlikely(!msdu)) { - ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", - buf_id); - return DP_MON_TX_STATUS_PPDU_NOT_DONE; - } - - rxcb = ATH12K_SKB_RXCB(msdu); - dma_unmap_single(ab->dev, rxcb->paddr, - msdu->len + skb_tailroom(msdu), - DMA_FROM_DEVICE); - - if (!mon_mpdu->head) - mon_mpdu->head = msdu; - else if (mon_mpdu->tail) - mon_mpdu->tail->next = msdu; - - mon_mpdu->tail = msdu; - - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); - status = DP_MON_TX_BUFFER_ADDR; - break; - } - case HAL_TX_MPDU_END: list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, &tx_ppdu_info->dp_tx_mon_mpdu_list); @@ -1985,15 +1929,16 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, struct dp_mon_tx_ppdu_info *tx_ppdu_info) { struct dp_mon_mpdu *tmp, *mon_mpdu; - struct sk_buff *head_msdu; + struct sk_buff *head_msdu, *tail_msdu; list_for_each_entry_safe(mon_mpdu, tmp, &tx_ppdu_info->dp_tx_mon_mpdu_list, list) { list_del(&mon_mpdu->list); head_msdu = mon_mpdu->head; + tail_msdu = mon_mpdu->tail; if (head_msdu) - ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, + ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu, &tx_ppdu_info->rx_status, napi); kfree(mon_mpdu); @@ -2069,7 +2014,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, struct ath12k_skb_rxcb *rxcb; struct dp_srng *mon_dst_ring; struct hal_srng *srng; - struct dp_rxdma_ring *buf_ring; + struct dp_rxdma_mon_ring *buf_ring; u64 cookie; u32 ppdu_id; int num_buffs_reaped = 0, srng_id, buf_id; @@ -2088,8 +2033,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; buf_ring = &dp->rxdma_mon_buf_ring; } else { - mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id]; - buf_ring = &dp->tx_mon_buf_ring; + return 0; } srng = &ab->hal.srng_list[mon_dst_ring->ring_id]; @@ -2203,7 +2147,7 @@ ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_st } static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, - struct ath12k_sta *arsta, + struct ath12k_link_sta *arsta, struct hal_rx_mon_ppdu_info *ppdu_info) { struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats; @@ -2359,7 +2303,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, struct hal_rx_mon_ppdu_info *ppdu_info, u32 uid) { - struct ath12k_sta *arsta = NULL; + struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; struct ath12k_rx_peer_stats *rx_stats = NULL; struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid]; struct ath12k_peer *peer; @@ -2376,7 +2321,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, return; } - arsta = (struct ath12k_sta *)peer->sta->drv_priv; + ahsta = ath12k_sta_to_ahsta(peer->sta); + arsta = &ahsta->deflink; rx_stats = arsta->rx_stats; if (!rx_stats) @@ -2482,8 +2428,9 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, struct ath12k_skb_rxcb *rxcb; struct dp_srng *mon_dst_ring; struct hal_srng *srng; - struct dp_rxdma_ring *buf_ring; - struct ath12k_sta *arsta = NULL; + struct dp_rxdma_mon_ring *buf_ring; + struct ath12k_sta *ahsta = NULL; + struct ath12k_link_sta *arsta; struct ath12k_peer *peer; u64 cookie; int num_buffs_reaped = 0, srng_id, buf_id; @@ -2552,7 +2499,8 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, } if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { - arsta = (struct ath12k_sta *)peer->sta->drv_priv; + ahsta = ath12k_sta_to_ahsta(peer->sta); + arsta = &ahsta->deflink; ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, ppdu_info); } else if ((ppdu_info->fc_valid) && @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_MON_H @@ -80,7 +80,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, int mac_id, struct sk_buff *skb, struct napi_struct *napi); int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab, - struct dp_rxdma_ring *buf_ring, + struct dp_rxdma_mon_ring *buf_ring, int req_entries); int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, enum dp_monitor_mode monitor_mode, @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/ieee80211.h> @@ -17,40 +17,41 @@ #include "dp_tx.h" #include "peer.h" #include "dp_mon.h" +#include "debugfs_htt_stats.h" #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, struct hal_rx_desc *desc) { - if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc)) + if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc)) return HAL_ENCRYPT_TYPE_OPEN; - return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc); + return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc); } u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc); + return ab->hal_rx_ops->rx_desc_get_decap_type(desc); } static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc); + return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc); } static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); + return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); } static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc); + return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc); } static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, @@ -58,7 +59,7 @@ static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, { struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz); + hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); return ieee80211_has_morefrags(hdr->frame_control); } @@ -67,156 +68,156 @@ static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, { struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz); + hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; } static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc); + return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc); } static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc); + return ab->hal_rx_ops->dp_rx_h_msdu_done(desc); } static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc); + return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc); } static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc); + return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc); } static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc); + return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc); } u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc); + return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc); } static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc); + return ab->hal_rx_ops->rx_desc_get_msdu_len(desc); } static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc); + return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc); } static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc); + return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc); } static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc); + return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc); } static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc); + return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc); } static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc); + return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc); } static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc)); + return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc)); } static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc); + return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc); } static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc); + return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc); } u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc); + return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc); } static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc); + return ab->hal_rx_ops->rx_desc_get_first_msdu(desc); } static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc); + return ab->hal_rx_ops->rx_desc_get_last_msdu(desc); } static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, struct hal_rx_desc *fdesc, struct hal_rx_desc *ldesc) { - ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc); + ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc); } static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, struct hal_rx_desc *desc, u16 len) { - ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len); + ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); } static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, struct hal_rx_desc *desc) { return (ath12k_dp_rx_h_first_msdu(ab, desc) && - ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc)); + ab->hal_rx_ops->rx_desc_is_da_mcbc(desc)); } static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc); + return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc); } static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc); + return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc); } static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, struct hal_rx_desc *desc, struct ieee80211_hdr *hdr) { - ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr); + ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr); } static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, @@ -224,54 +225,88 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, u8 *crypto_hdr, enum hal_encrypt_type enctype) { - ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); + ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); } static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab, struct hal_rx_desc *desc) { - return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc); + return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc); } -static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab) +static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, + struct hal_rx_desc *desc) { - int i, reaped = 0; - unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); + return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); +} + +static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(skb_list))) + dev_kfree_skb_any(skb); +} + +static size_t ath12k_dp_list_cut_nodes(struct list_head *list, + struct list_head *head, + size_t count) +{ + struct list_head *cur; + struct ath12k_rx_desc_info *rx_desc; + size_t nodes = 0; + + if (!count) { + INIT_LIST_HEAD(list); + goto out; + } + + list_for_each(cur, head) { + if (!count) + break; + + rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list); + rx_desc->in_use = true; - do { - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) - reaped += ath12k_dp_mon_process_ring(ab, i, NULL, - DP_MON_SERVICE_BUDGET, - ATH12K_DP_RX_MONITOR_MODE); + count--; + nodes++; + } - /* nothing more to reap */ - if (reaped < DP_MON_SERVICE_BUDGET) - return 0; + list_cut_before(list, head, cur); +out: + return nodes; +} - } while (time_before(jiffies, timeout)); +static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp, + struct list_head *used_list) +{ + struct ath12k_rx_desc_info *rx_desc, *safe; - ath12k_warn(ab, "dp mon ring purge timeout"); + /* Reset the use flag */ + list_for_each_entry_safe(rx_desc, safe, used_list, list) + rx_desc->in_use = false; - return -ETIMEDOUT; + spin_lock_bh(&dp->rx_desc_lock); + list_splice_tail(used_list, &dp->rx_desc_free_list); + spin_unlock_bh(&dp->rx_desc_lock); } /* Returns number of Rx buffers replenished */ -int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id, +int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, struct dp_rxdma_ring *rx_ring, - int req_entries, - enum hal_rx_buf_return_buf_manager mgr, - bool hw_cc) + struct list_head *used_list, + int req_entries) { struct ath12k_buffer_addr *desc; struct hal_srng *srng; struct sk_buff *skb; int num_free; int num_remain; - int buf_id; u32 cookie; dma_addr_t paddr; struct ath12k_dp *dp = &ab->dp; struct ath12k_rx_desc_info *rx_desc; + enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm; req_entries = min(req_entries, rx_ring->bufs_max); @@ -288,6 +323,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id, req_entries = min(num_free, req_entries); num_remain = req_entries; + if (!num_remain) + goto out; + + /* Get the descriptor from free list */ + if (list_empty(used_list)) { + spin_lock_bh(&dp->rx_desc_lock); + req_entries = ath12k_dp_list_cut_nodes(used_list, + &dp->rx_desc_free_list, + num_remain); + spin_unlock_bh(&dp->rx_desc_lock); + num_remain = req_entries; + } + while (num_remain > 0) { skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE); @@ -307,46 +355,20 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id, if (dma_mapping_error(ab->dev, paddr)) goto fail_free_skb; - if (hw_cc) { - spin_lock_bh(&dp->rx_desc_lock); - - /* Get desc from free list and store in used list - * for cleanup purposes - * - * TODO: pass the removed descs rather than - * add/read to optimize - */ - rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list, - struct ath12k_rx_desc_info, - list); - if (!rx_desc) { - spin_unlock_bh(&dp->rx_desc_lock); - goto fail_dma_unmap; - } - - rx_desc->skb = skb; - cookie = rx_desc->cookie; - list_del(&rx_desc->list); - list_add_tail(&rx_desc->list, &dp->rx_desc_used_list); + rx_desc = list_first_entry_or_null(used_list, + struct ath12k_rx_desc_info, + list); + if (!rx_desc) + goto fail_dma_unmap; - spin_unlock_bh(&dp->rx_desc_lock); - } else { - spin_lock_bh(&rx_ring->idr_lock); - buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, - rx_ring->bufs_max * 3, GFP_ATOMIC); - spin_unlock_bh(&rx_ring->idr_lock); - if (buf_id < 0) - goto fail_dma_unmap; - cookie = u32_encode_bits(mac_id, - DP_RXDMA_BUF_COOKIE_PDEV_ID) | - u32_encode_bits(buf_id, - DP_RXDMA_BUF_COOKIE_BUF_ID); - } + rx_desc->skb = skb; + cookie = rx_desc->cookie; desc = ath12k_hal_srng_src_get_next_entry(ab, srng); if (!desc) - goto fail_buf_unassign; + goto fail_dma_unmap; + list_del(&rx_desc->list); ATH12K_SKB_RXCB(skb)->paddr = paddr; num_remain--; @@ -354,39 +376,26 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id, ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); } - ath12k_hal_srng_access_end(ab, srng); - - spin_unlock_bh(&srng->lock); - - return req_entries - num_remain; + goto out; -fail_buf_unassign: - if (hw_cc) { - spin_lock_bh(&dp->rx_desc_lock); - list_del(&rx_desc->list); - list_add_tail(&rx_desc->list, &dp->rx_desc_free_list); - rx_desc->skb = NULL; - spin_unlock_bh(&dp->rx_desc_lock); - } else { - spin_lock_bh(&rx_ring->idr_lock); - idr_remove(&rx_ring->bufs_idr, buf_id); - spin_unlock_bh(&rx_ring->idr_lock); - } fail_dma_unmap: dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); fail_free_skb: dev_kfree_skb_any(skb); - +out: ath12k_hal_srng_access_end(ab, srng); + if (!list_empty(used_list)) + ath12k_dp_rx_enqueue_free(dp, used_list); + spin_unlock_bh(&srng->lock); return req_entries - num_remain; } -static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab, - struct dp_rxdma_ring *rx_ring) +static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, + struct dp_rxdma_mon_ring *rx_ring) { struct sk_buff *skb; int buf_id; @@ -411,22 +420,15 @@ static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab, static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; - struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; - - ath12k_dp_rxdma_buf_ring_free(ab, rx_ring); - - rx_ring = &dp->rxdma_mon_buf_ring; - ath12k_dp_rxdma_buf_ring_free(ab, rx_ring); - rx_ring = &dp->tx_mon_buf_ring; - ath12k_dp_rxdma_buf_ring_free(ab, rx_ring); + ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); return 0; } -static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, - struct dp_rxdma_ring *rx_ring, - u32 ringtype) +static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, + struct dp_rxdma_mon_ring *rx_ring, + u32 ringtype) { int num_entries; @@ -434,23 +436,30 @@ static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, ath12k_hal_srng_get_entrysize(ab, ringtype); rx_ring->bufs_max = num_entries; - if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF)) - ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); - else - ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries, - ab->hw_params->hal_params->rx_buf_rbm, - ringtype == HAL_RXDMA_BUF); + ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); + + return 0; +} + +static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, + struct dp_rxdma_ring *rx_ring) +{ + LIST_HEAD(list); + + rx_ring->bufs_max = rx_ring->refill_buf_ring.size / + ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); + + ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); + return 0; } static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; - struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; int ret; - ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring, - HAL_RXDMA_BUF); + ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring); if (ret) { ath12k_warn(ab, "failed to setup HAL_RXDMA_BUF\n"); @@ -458,23 +467,14 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) } if (ab->hw_params->rxdma1_enable) { - rx_ring = &dp->rxdma_mon_buf_ring; - ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring, - HAL_RXDMA_MONITOR_BUF); + ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, + &dp->rxdma_mon_buf_ring, + HAL_RXDMA_MONITOR_BUF); if (ret) { ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); return ret; } - - rx_ring = &dp->tx_mon_buf_ring; - ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring, - HAL_TX_MONITOR_BUF); - if (ret) { - ath12k_warn(ab, - "failed to setup HAL_TX_MONITOR_BUF\n"); - return ret; - } } return 0; @@ -486,10 +486,8 @@ static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) struct ath12k_base *ab = ar->ab; int i; - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); - ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]); - } } void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) @@ -533,7 +531,7 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) int ret; u32 mac_id = dp->mac_id; - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ret = ath12k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring[i], HAL_RXDMA_MONITOR_DST, @@ -544,17 +542,6 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) "failed to setup HAL_RXDMA_MONITOR_DST\n"); return ret; } - - ret = ath12k_dp_srng_setup(ar->ab, - &dp->tx_mon_dst_ring[i], - HAL_TX_MONITOR_DST, - 0, mac_id + i, - DP_TX_MONITOR_DEST_RING_SIZE); - if (ret) { - ath12k_warn(ar->ab, - "failed to setup HAL_TX_MONITOR_DST\n"); - return ret; - } } return 0; @@ -753,15 +740,22 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u { struct ath12k_reo_queue_ref *qref; struct ath12k_dp *dp = &ab->dp; + bool ml_peer = false; if (!ab->hw_params->reoq_lut_support) return; - /* TODO: based on ML peer or not, select the LUT. below assumes non - * ML peer - */ - qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + - (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + if (peer_id & ATH12K_PEER_ML_ID_VALID) { + peer_id &= ~ATH12K_PEER_ML_ID_VALID; + ml_peer = true; + } + + if (ml_peer) + qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + else + qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); qref->info0 = u32_encode_bits(lower_32_bits(paddr), BUFFER_ADDR_INFO0_ADDR); @@ -774,15 +768,22 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u { struct ath12k_reo_queue_ref *qref; struct ath12k_dp *dp = &ab->dp; + bool ml_peer = false; if (!ab->hw_params->reoq_lut_support) return; - /* TODO: based on ML peer or not, select the LUT. below assumes non - * ML peer - */ - qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + - (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + if (peer_id & ATH12K_PEER_ML_ID_VALID) { + peer_id &= ~ATH12K_PEER_ML_ID_VALID; + ml_peer = true; + } + + if (ml_peer) + qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + else + qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | @@ -815,7 +816,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, rx_tid->vaddr = NULL; } - ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); + if (peer->mlo) + ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid); + else + ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); rx_tid->active = false; } @@ -953,7 +957,13 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_ return -ENOENT; } - if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) { + if (!peer->primary_link) { + spin_unlock_bh(&ab->base_lock); + return 0; + } + + if (ab->hw_params->reoq_lut_support && + (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) { spin_unlock_bh(&ab->base_lock); ath12k_warn(ab, "reo qref table is not setup\n"); return -EINVAL; @@ -1034,7 +1044,11 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_ /* Update the REO queue LUT at the corresponding peer id * and tid with qaddr. */ - ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr); + if (peer->mlo) + ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr); + else + ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr); + spin_unlock_bh(&ab->base_lock); } else { spin_unlock_bh(&ab->base_lock); @@ -1051,16 +1065,27 @@ err_mem_free: } int ath12k_dp_rx_ampdu_start(struct ath12k *ar, - struct ieee80211_ampdu_params *params) + struct ieee80211_ampdu_params *params, + u8 link_id) { struct ath12k_base *ab = ar->ab; - struct ath12k_sta *arsta = (void *)params->sta->drv_priv; - int vdev_id = arsta->arvif->vdev_id; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); + struct ath12k_link_sta *arsta; + int vdev_id; int ret; - ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id, + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, + ahsta->link[link_id]); + if (!arsta) + return -ENOLINK; + + vdev_id = arsta->arvif->vdev_id; + + ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id, params->tid, params->buf_size, - params->ssn, arsta->pn_type); + params->ssn, arsta->ahsta->pn_type); if (ret) ath12k_warn(ab, "failed to setup rx tid %d\n", ret); @@ -1068,18 +1093,29 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar, } int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, - struct ieee80211_ampdu_params *params) + struct ieee80211_ampdu_params *params, + u8 link_id) { struct ath12k_base *ab = ar->ab; struct ath12k_peer *peer; - struct ath12k_sta *arsta = (void *)params->sta->drv_priv; - int vdev_id = arsta->arvif->vdev_id; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); + struct ath12k_link_sta *arsta; + int vdev_id; bool active; int ret; + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, + ahsta->link[link_id]); + if (!arsta) + return -ENOLINK; + + vdev_id = arsta->arvif->vdev_id; + spin_lock_bh(&ab->base_lock); - peer = ath12k_peer_find(ab, vdev_id, params->sta->addr); + peer = ath12k_peer_find(ab, vdev_id, arsta->addr); if (!peer) { spin_unlock_bh(&ab->base_lock); ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); @@ -1104,7 +1140,7 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, return ret; } -int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif, +int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, const u8 *peer_addr, enum set_key_cmd key_cmd, struct ieee80211_key_conf *key) @@ -1282,10 +1318,10 @@ static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, return 0; } -static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, - int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, - const void *ptr, void *data), - void *data) +int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data) { const struct htt_tlv *tlv; const void *begin = ptr; @@ -1326,7 +1362,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar, struct ath12k_base *ab = ar->ab; struct ath12k_peer *peer; struct ieee80211_sta *sta; - struct ath12k_sta *arsta; + struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; struct htt_ppdu_stats_user_rate *user_rate; struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; @@ -1339,9 +1376,6 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar, u8 tid = HTT_PPDU_STATS_NON_QOS_TID; bool is_ampdu = false; - if (!usr_stats) - return; - if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) return; @@ -1410,7 +1444,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar, } sta = peer->sta; - arsta = (struct ath12k_sta *)sta->drv_priv; + ahsta = ath12k_sta_to_ahsta(sta); + arsta = &ahsta->deflink; memset(&arsta->txrate, 0, sizeof(arsta->txrate)); @@ -1555,6 +1590,13 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); + if (len > (skb->len - struct_size(msg, data, 0))) { + ath12k_warn(ab, + "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n", + len, skb->len); + return -EINVAL; + } + pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); ppdu_id = le32_to_cpu(msg->ppdu_id); @@ -1583,6 +1625,16 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, goto exit; } + if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { + spin_unlock_bh(&ar->data_lock); + ath12k_warn(ab, + "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n", + ppdu_info->ppdu_stats.common.num_users, + HTT_PPDU_STATS_MAX_USERS); + ret = -EINVAL; + goto exit; + } + /* back up data rate tlv for all peers */ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && @@ -1641,11 +1693,16 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; pdev_id = u32_get_bits(__le32_to_cpu(msg->info), HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); - ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { - ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id); - return; + /* It is possible that the ar is not yet active (started). + * The above function will only look for the active pdev + * and hence %NULL return is possible. Just silently + * discard this message + */ + goto exit; } spin_lock_bh(&ar->data_lock); @@ -1661,6 +1718,8 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); spin_unlock_bh(&ar->data_lock); +exit: + rcu_read_unlock(); } void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, @@ -1738,6 +1797,7 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, ath12k_htt_pull_ppdu_stats(ab, skb); break; case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: + ath12k_debugfs_htt_ext_stats_handler(ab, skb); break; case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: ath12k_htt_mlo_offset_event_handler(ab, skb); @@ -1762,7 +1822,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, int buf_first_hdr_len, buf_first_len; struct hal_rx_desc *ldesc; int space_extra, rem_len, buf_len; - u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; /* As the msdu is spread across multiple rx buffers, * find the offset to the start of msdu for computing @@ -2356,8 +2416,10 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, channel_num = meta_data; center_freq = meta_data >> 16; - if (center_freq >= 5935 && center_freq <= 7105) { + if (center_freq >= ATH12K_MIN_6G_FREQ && + center_freq <= ATH12K_MAX_6G_FREQ) { rx_status->band = NL80211_BAND_6GHZ; + rx_status->freq = center_freq; } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; } else if (channel_num >= 36 && channel_num <= 173) { @@ -2375,8 +2437,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, rx_desc, sizeof(*rx_desc)); } - rx_status->freq = ieee80211_channel_to_frequency(channel_num, - rx_status->band); + if (rx_status->band != NL80211_BAND_6GHZ) + rx_status->freq = ieee80211_channel_to_frequency(channel_num, + rx_status->band); ath12k_dp_rx_h_rate(ar, rx_desc, rx_status); } @@ -2415,10 +2478,15 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap pubsta = peer ? peer->sta : NULL; + if (pubsta && pubsta->valid_links) { + status->link_valid = 1; + status->link_id = peer->link_id; + } + spin_unlock_bh(&ab->base_lock); ath12k_dbg(ab, ATH12K_DBG_DATA, - "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, peer ? peer->addr : NULL, @@ -2432,6 +2500,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap (status->bw == RATE_INFO_BW_40) ? "40" : "", (status->bw == RATE_INFO_BW_80) ? "80" : "", (status->bw == RATE_INFO_BW_160) ? "160" : "", + (status->bw == RATE_INFO_BW_320) ? "320" : "", status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", status->rate_idx, status->nss, @@ -2458,7 +2527,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) rx_status->flag |= RX_FLAG_8023; - ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); + ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); } static int ath12k_dp_rx_process_msdu(struct ath12k *ar, @@ -2473,7 +2542,7 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar, u8 l3_pad_bytes; u16 msdu_len; int ret; - u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu); if (!last_buf) { @@ -2535,11 +2604,14 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, struct sk_buff_head *msdu_list, int ring_id) { + struct ath12k_hw_group *ag = ab->ag; struct ieee80211_rx_status rx_status = {0}; struct ath12k_skb_rxcb *rxcb; struct sk_buff *msdu; struct ath12k *ar; - u8 mac_id, pdev_id; + struct ath12k_hw_link *hw_links = ag->hw_links; + struct ath12k_base *partner_ab; + u8 hw_link_id, pdev_id; int ret; if (skb_queue_empty(msdu_list)) @@ -2549,15 +2621,18 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, while ((msdu = __skb_dequeue(msdu_list))) { rxcb = ATH12K_SKB_RXCB(msdu); - mac_id = rxcb->mac_id; - pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); - ar = ab->pdevs[pdev_id].ar; - if (!rcu_dereference(ab->pdevs_active[pdev_id])) { + hw_link_id = rxcb->hw_link_id; + partner_ab = ath12k_ag_to_ab(ag, + hw_links[hw_link_id].device_id); + pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, + hw_links[hw_link_id].pdev_idx); + ar = partner_ab->pdevs[pdev_id].ar; + if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) { dev_kfree_skb_any(msdu); continue; } - if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { dev_kfree_skb_any(msdu); continue; } @@ -2576,25 +2651,55 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, rcu_read_unlock(); } +static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, + enum ath12k_peer_metadata_version ver, + __le32 peer_metadata) +{ + switch (ver) { + default: + ath12k_warn(ab, "Unknown peer metadata version: %d", ver); + fallthrough; + case ATH12K_PEER_METADATA_V0: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V0_PEER_ID); + case ATH12K_PEER_METADATA_V1: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1_PEER_ID); + case ATH12K_PEER_METADATA_V1A: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1A_PEER_ID); + case ATH12K_PEER_METADATA_V1B: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1B_PEER_ID); + } +} + int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, struct napi_struct *napi, int budget) { + struct ath12k_hw_group *ag = ab->ag; + struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; + struct ath12k_hw_link *hw_links = ag->hw_links; + int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; struct ath12k_rx_desc_info *desc_info; struct ath12k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; struct hal_reo_dest_ring *desc; - int num_buffs_reaped = 0; + struct ath12k_base *partner_ab; struct sk_buff_head msdu_list; struct ath12k_skb_rxcb *rxcb; int total_msdu_reaped = 0; + u8 hw_link_id, device_id; struct hal_srng *srng; struct sk_buff *msdu; bool done = false; - int mac_id; u64 desc_va; __skb_queue_head_init(&msdu_list); + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); + srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; spin_lock_bh(&srng->lock); @@ -2603,24 +2708,38 @@ try_again: ath12k_hal_srng_access_begin(ab, srng); while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + struct rx_mpdu_desc *mpdu_info; + struct rx_msdu_desc *msdu_info; enum hal_reo_dest_ring_push_reason push_reason; u32 cookie; cookie = le32_get_bits(desc->buf_addr_info.info1, BUFFER_ADDR_INFO1_SW_COOKIE); - mac_id = le32_get_bits(desc->info0, - HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); + hw_link_id = le32_get_bits(desc->info0, + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | le32_to_cpu(desc->buf_va_lo)); desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); + device_id = hw_links[hw_link_id].device_id; + partner_ab = ath12k_ag_to_ab(ag, device_id); + if (unlikely(!partner_ab)) { + if (desc_info->skb) { + dev_kfree_skb_any(desc_info->skb); + desc_info->skb = NULL; + } + + continue; + } + /* retry manual desc retrieval */ if (!desc_info) { - desc_info = ath12k_dp_get_rx_desc(ab, cookie); + desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie); if (!desc_info) { - ath12k_warn(ab, "Invalid cookie in manual desc retrieval"); + ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", + cookie); continue; } } @@ -2631,36 +2750,37 @@ try_again: msdu = desc_info->skb; desc_info->skb = NULL; - spin_lock_bh(&dp->rx_desc_lock); - list_move_tail(&desc_info->list, &dp->rx_desc_free_list); - spin_unlock_bh(&dp->rx_desc_lock); + list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); rxcb = ATH12K_SKB_RXCB(msdu); - dma_unmap_single(ab->dev, rxcb->paddr, + dma_unmap_single(partner_ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); - num_buffs_reaped++; + num_buffs_reaped[device_id]++; push_reason = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_PUSH_REASON); if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { dev_kfree_skb_any(msdu); - ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; + ab->soc_stats.hal_reo_error[ring_id]++; continue; } - rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + msdu_info = &desc->rx_msdu_info; + mpdu_info = &desc->rx_mpdu_info; + + rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); - rxcb->mac_id = mac_id; - rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data, - RX_MPDU_DESC_META_DATA_PEER_ID); - rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0, + rxcb->hw_link_id = hw_link_id; + rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, + mpdu_info->peer_meta_data); + rxcb->tid = le32_get_bits(mpdu_info->info0, RX_MPDU_DESC_INFO0_TID); __skb_queue_tail(&msdu_list, msdu); @@ -2694,9 +2814,17 @@ try_again: if (!total_msdu_reaped) goto exit; - /* TODO: Move to implicit BM? */ - ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped, - ab->hw_params->hal_params->rx_buf_rbm, true); + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { + if (!num_buffs_reaped[device_id]) + continue; + + partner_ab = ath12k_ag_to_ab(ag, device_id); + rx_ring = &partner_ab->dp.rx_refill_buf_ring; + + ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, + &rx_desc_used_list[device_id], + num_buffs_reaped[device_id]); + } ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, ring_id); @@ -2736,10 +2864,17 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev peer = ath12k_peer_find(ab, vdev_id, peer_mac); if (!peer) { spin_unlock_bh(&ab->base_lock); + crypto_free_shash(tfm); ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); return -ENOENT; } + if (!peer->primary_link) { + spin_unlock_bh(&ab->base_lock); + crypto_free_shash(tfm); + return 0; + } + for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { rx_tid = &peer->rx_tid[i]; rx_tid->ab = ab; @@ -2748,6 +2883,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev } peer->tfm_mmic = tfm; + peer->dp_setup_done = true; spin_unlock_bh(&ab->base_lock); return 0; @@ -2805,7 +2941,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer u8 mic[IEEE80211_CCMP_MIC_LEN]; int head_len, tail_len, ret; size_t data_len; - u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; u8 *key, *data; u8 key_idx; @@ -2845,7 +2981,7 @@ mic_fail: ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); - ieee80211_rx(ar->hw, msdu); + ieee80211_rx(ath12k_ar_to_hw(ar), msdu); return -EINVAL; } @@ -2855,7 +2991,7 @@ static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu, struct ieee80211_hdr *hdr; size_t hdr_len; size_t crypto_len; - u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; if (!flags) return; @@ -2893,7 +3029,7 @@ static int ath12k_dp_rx_h_defrag(struct ath12k *ar, bool is_decrypted = false; int msdu_len = 0; int extra_space; - u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; first_frag = skb_peek(&rx_tid->rx_frags); last_frag = skb_peek_tail(&rx_tid->rx_frags); @@ -2964,12 +3100,13 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, struct hal_srng *srng; dma_addr_t link_paddr, buf_paddr; u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; - u32 cookie, hal_rx_desc_sz, dest_ring_info0; + u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; int ret; struct ath12k_rx_desc_info *desc_info; + enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm; u8 dst_ind; - hal_rx_desc_sz = ab->hw_params->hal_desc_sz; + hal_rx_desc_sz = ab->hal.hal_desc_sz; link_desc_banks = dp->link_desc_banks; reo_dest_ring = rx_tid->dst_ring_desc; @@ -3000,7 +3137,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, buf_paddr = dma_map_single(ab->dev, defrag_skb->data, defrag_skb->len + skb_tailroom(defrag_skb), - DMA_FROM_DEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, buf_paddr)) return -ENOMEM; @@ -3016,9 +3153,9 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, } desc_info->skb = defrag_skb; + desc_info->in_use = true; list_del(&desc_info->list); - list_add_tail(&desc_info->list, &dp->rx_desc_used_list); spin_unlock_bh(&dp->rx_desc_lock); ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; @@ -3027,7 +3164,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, desc_info->cookie, HAL_RX_BUF_RBM_SW3_BM); - /* Fill mpdu details into reo entrace ring */ + /* Fill mpdu details into reo entrance ring */ srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; spin_lock_bh(&srng->lock); @@ -3044,7 +3181,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, cookie, - HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST); + idle_link_rbm); mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | @@ -3056,13 +3193,11 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, reo_ent_ring->rx_mpdu_info.peer_meta_data = reo_dest_ring->rx_mpdu_info.peer_meta_data; - /* Firmware expects physical address to be filled in queue_addr_lo in - * the MLO scenario and in case of non MLO peer meta data needs to be - * filled. - * TODO: Need to handle for MLO scenario. - */ - reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; - reo_ent_ring->info0 = le32_encode_bits(dst_ind, + reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr)); + queue_addr_hi = upper_32_bits(rx_tid->paddr); + reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, + HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | + le32_encode_bits(dst_ind, HAL_REO_ENTR_RING_INFO0_DEST_IND); reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, @@ -3080,13 +3215,13 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, err_free_desc: spin_lock_bh(&dp->rx_desc_lock); - list_del(&desc_info->list); - list_add_tail(&desc_info->list, &dp->rx_desc_free_list); + desc_info->in_use = false; desc_info->skb = NULL; + list_add_tail(&desc_info->list, &dp->rx_desc_free_list); spin_unlock_bh(&dp->rx_desc_lock); err_unmap_dma: dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), - DMA_FROM_DEVICE); + DMA_TO_DEVICE); return ret; } @@ -3123,7 +3258,7 @@ static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb) struct ieee80211_hdr *hdr; u64 pn = 0; u8 *ehdr; - u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); @@ -3214,6 +3349,14 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, ret = -ENOENT; goto out_unlock; } + + if (!peer->dp_setup_done) { + ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", + peer->addr, peer_id); + ret = -ENOENT; + goto out_unlock; + } + rx_tid = &peer->rx_tid[tid]; if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || @@ -3229,7 +3372,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, goto out_unlock; } - if (frag_no > __fls(rx_tid->rx_frag_bitmap)) + if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) __skb_queue_tail(&rx_tid->rx_frags, msdu); else ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); @@ -3291,6 +3434,7 @@ out_unlock: static int ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, + struct list_head *used_list, bool drop, u32 cookie) { struct ath12k_base *ab = ar->ab; @@ -3298,7 +3442,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, struct ath12k_skb_rxcb *rxcb; struct hal_rx_desc *rx_desc; u16 msdu_len; - u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz; + u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; struct ath12k_rx_desc_info *desc_info; u64 desc_va; @@ -3310,7 +3454,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, if (!desc_info) { desc_info = ath12k_dp_get_rx_desc(ab, cookie); if (!desc_info) { - ath12k_warn(ab, "Invalid cookie in manual desc retrieval"); + ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n", + cookie); return -EINVAL; } } @@ -3320,9 +3465,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, msdu = desc_info->skb; desc_info->skb = NULL; - spin_lock_bh(&ab->dp.rx_desc_lock); - list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list); - spin_unlock_bh(&ab->dp.rx_desc_lock); + + list_add_tail(&desc_info->list, used_list); rxcb = ATH12K_SKB_RXCB(msdu); dma_unmap_single(ar->ab->dev, rxcb->paddr, @@ -3340,7 +3484,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, goto exit; } - if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { dev_kfree_skb_any(msdu); goto exit; } @@ -3370,7 +3514,10 @@ exit: int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, int budget) { + struct ath12k_hw_group *ag = ab->ag; + struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; + int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; struct dp_link_desc_bank *link_desc_banks; enum hal_rx_buf_return_buf_manager rbm; struct hal_rx_msdu_link *link_desc_va; @@ -3378,22 +3525,24 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, struct hal_reo_dest_ring *reo_desc; struct dp_rxdma_ring *rx_ring; struct dp_srng *reo_except; + struct ath12k_hw_link *hw_links = ag->hw_links; + struct ath12k_base *partner_ab; + u8 hw_link_id, device_id; u32 desc_bank, num_msdus; struct hal_srng *srng; - struct ath12k_dp *dp; - int mac_id; struct ath12k *ar; dma_addr_t paddr; bool is_frag; - bool drop = false; + bool drop; int pdev_id; tot_n_bufs_reaped = 0; quota = budget; - dp = &ab->dp; - reo_except = &dp->reo_except_ring; - link_desc_banks = dp->link_desc_banks; + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); + + reo_except = &ab->dp.reo_except_ring; srng = &ab->hal.srng_list[reo_except->ring_id]; @@ -3403,7 +3552,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, while (budget && (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + drop = false; ab->soc_stats.err_ring_pkts++; + ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, &desc_bank); if (ret) { @@ -3411,16 +3562,27 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, ret); continue; } + + hw_link_id = le32_get_bits(reo_desc->info0, + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); + device_id = hw_links[hw_link_id].device_id; + partner_ab = ath12k_ag_to_ab(ag, device_id); + + pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, + hw_links[hw_link_id].pdev_idx); + ar = partner_ab->pdevs[pdev_id].ar; + + link_desc_banks = partner_ab->dp.link_desc_banks; link_desc_va = link_desc_banks[desc_bank].vaddr + (paddr - link_desc_banks[desc_bank].paddr); ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); - if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST && + if (rbm != partner_ab->dp.idle_link_rbm && rbm != HAL_RX_BUF_RBM_SW3_BM && - rbm != ab->hw_params->hal_params->rx_buf_rbm) { + rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) { ab->soc_stats.invalid_rbm++; ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); - ath12k_dp_rx_link_desc_return(ab, reo_desc, + ath12k_dp_rx_link_desc_return(partner_ab, reo_desc, HAL_WBM_REL_BM_ACT_REL_MSDU); continue; } @@ -3430,24 +3592,26 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, /* Process only rx fragments with one msdu per link desc below, and drop * msdu's indicated due to error reasons. + * Dynamic fragmentation not supported in Multi-link client, so drop the + * partner device buffers. */ - if (!is_frag || num_msdus > 1) { + if (!is_frag || num_msdus > 1 || + partner_ab->device_id != ab->device_id) { drop = true; + /* Return the link desc back to wbm idle list */ - ath12k_dp_rx_link_desc_return(ab, reo_desc, + ath12k_dp_rx_link_desc_return(partner_ab, reo_desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } for (i = 0; i < num_msdus; i++) { - mac_id = le32_get_bits(reo_desc->info0, - HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); - - pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); - ar = ab->pdevs[pdev_id].ar; - - if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop, - msdu_cookies[i])) + if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, + &rx_desc_used_list[device_id], + drop, + msdu_cookies[i])) { + num_buffs_reaped[device_id]++; tot_n_bufs_reaped++; + } } if (tot_n_bufs_reaped >= quota) { @@ -3463,10 +3627,17 @@ exit: spin_unlock_bh(&srng->lock); - rx_ring = &dp->rx_refill_buf_ring; + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { + if (!num_buffs_reaped[device_id]) + continue; + + partner_ab = ath12k_ag_to_ab(ag, device_id); + rx_ring = &partner_ab->dp.rx_refill_buf_ring; - ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped, - ab->hw_params->hal_params->rx_buf_rbm, true); + ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, + &rx_desc_used_list[device_id], + num_buffs_reaped[device_id]); + } return tot_n_bufs_reaped; } @@ -3480,7 +3651,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar, int n_buffs; n_buffs = DIV_ROUND_UP(msdu_len, - (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz)); + (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz)); skb_queue_walk_safe(msdu_list, skb, tmp) { rxcb = ATH12K_SKB_RXCB(skb); @@ -3500,23 +3671,13 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list) { struct ath12k_base *ab = ar->ab; - u16 msdu_len, peer_id; + u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; u8 l3pad_bytes; struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); - u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); - peer_id = ath12k_dp_rx_h_peer_id(ab, desc); - - spin_lock(&ab->base_lock); - if (!ath12k_peer_find_by_id(ab, peer_id)) { - spin_unlock(&ab->base_lock); - ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n", - peer_id); - return -EINVAL; - } - spin_unlock(&ab->base_lock); if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { /* First buffer will be freed by the caller, so deduct it's length */ @@ -3611,7 +3772,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; u8 l3pad_bytes; struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); - u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc); rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc); @@ -3693,26 +3854,33 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar, int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, struct napi_struct *napi, int budget) { + struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; + struct ath12k_hw_group *ag = ab->ag; struct ath12k *ar; struct ath12k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; struct hal_rx_wbm_rel_info err_info; struct hal_srng *srng; struct sk_buff *msdu; - struct sk_buff_head msdu_list[MAX_RADIOS]; + struct sk_buff_head msdu_list, scatter_msdu_list; struct ath12k_skb_rxcb *rxcb; void *rx_desc; - int mac_id; - int num_buffs_reaped = 0; + int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; + int total_num_buffs_reaped = 0; struct ath12k_rx_desc_info *desc_info; - int ret, i; + struct ath12k_hw_link *hw_links = ag->hw_links; + struct ath12k_base *partner_ab; + u8 hw_link_id, device_id; + int ret, pdev_id; + struct hal_rx_desc *msdu_data; - for (i = 0; i < ab->num_radios; i++) - __skb_queue_head_init(&msdu_list[i]); + __skb_queue_head_init(&msdu_list); + __skb_queue_head_init(&scatter_msdu_list); - srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; - rx_ring = &dp->rx_refill_buf_ring; + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); + srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; spin_lock_bh(&srng->lock); ath12k_hal_srng_access_begin(ab, srng); @@ -3730,38 +3898,45 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, continue; } - desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc; + desc_info = err_info.rx_desc; /* retry manual desc retrieval if hw cc is not done */ if (!desc_info) { desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); if (!desc_info) { - ath12k_warn(ab, "Invalid cookie in manual desc retrieval"); + ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n", + err_info.cookie); continue; } } - /* FIXME: Extract mac id correctly. Since descs are not tied - * to mac, we can extract from vdev id in ring desc. - */ - mac_id = 0; - if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) ath12k_warn(ab, "WBM RX err, Check HW CC implementation"); msdu = desc_info->skb; desc_info->skb = NULL; - spin_lock_bh(&dp->rx_desc_lock); - list_move_tail(&desc_info->list, &dp->rx_desc_free_list); - spin_unlock_bh(&dp->rx_desc_lock); + device_id = desc_info->device_id; + partner_ab = ath12k_ag_to_ab(ag, device_id); + if (unlikely(!partner_ab)) { + dev_kfree_skb_any(msdu); + + /* In any case continuation bit is set + * in the previous record, cleanup scatter_msdu_list + */ + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); + continue; + } + + list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); rxcb = ATH12K_SKB_RXCB(msdu); - dma_unmap_single(ab->dev, rxcb->paddr, + dma_unmap_single(partner_ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); - num_buffs_reaped++; + num_buffs_reaped[device_id]++; + total_num_buffs_reaped++; if (!err_info.continuation) budget--; @@ -3772,46 +3947,105 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, continue; } + msdu_data = (struct hal_rx_desc *)msdu->data; rxcb->err_rel_src = err_info.err_rel_src; rxcb->err_code = err_info.err_code; - rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; - __skb_queue_tail(&msdu_list[mac_id], msdu); - rxcb->is_first_msdu = err_info.first_msdu; rxcb->is_last_msdu = err_info.last_msdu; rxcb->is_continuation = err_info.continuation; + rxcb->rx_desc = msdu_data; + + if (err_info.continuation) { + __skb_queue_tail(&scatter_msdu_list, msdu); + continue; + } + + hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab, + msdu_data); + if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) { + dev_kfree_skb_any(msdu); + + /* In any case continuation bit is set + * in the previous record, cleanup scatter_msdu_list + */ + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); + continue; + } + + if (!skb_queue_empty(&scatter_msdu_list)) { + struct sk_buff *msdu; + + skb_queue_walk(&scatter_msdu_list, msdu) { + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->hw_link_id = hw_link_id; + } + + skb_queue_splice_tail_init(&scatter_msdu_list, + &msdu_list); + } + + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->hw_link_id = hw_link_id; + __skb_queue_tail(&msdu_list, msdu); } + /* In any case continuation bit is set in the + * last record, cleanup scatter_msdu_list + */ + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); + ath12k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); - if (!num_buffs_reaped) + if (!total_num_buffs_reaped) goto done; - ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped, - ab->hw_params->hal_params->rx_buf_rbm, true); + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { + if (!num_buffs_reaped[device_id]) + continue; + + partner_ab = ath12k_ag_to_ab(ag, device_id); + rx_ring = &partner_ab->dp.rx_refill_buf_ring; + + ath12k_dp_rx_bufs_replenish(ab, rx_ring, + &rx_desc_used_list[device_id], + num_buffs_reaped[device_id]); + } rcu_read_lock(); - for (i = 0; i < ab->num_radios; i++) { - if (!rcu_dereference(ab->pdevs_active[i])) { - __skb_queue_purge(&msdu_list[i]); + while ((msdu = __skb_dequeue(&msdu_list))) { + rxcb = ATH12K_SKB_RXCB(msdu); + hw_link_id = rxcb->hw_link_id; + + device_id = hw_links[hw_link_id].device_id; + partner_ab = ath12k_ag_to_ab(ag, device_id); + if (unlikely(!partner_ab)) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n", + hw_link_id, device_id); + dev_kfree_skb_any(msdu); continue; } - ar = ab->pdevs[i].ar; + pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, + hw_links[hw_link_id].pdev_idx); + ar = partner_ab->pdevs[pdev_id].ar; - if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { - __skb_queue_purge(&msdu_list[i]); + if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) { + dev_kfree_skb_any(msdu); continue; } - while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) - ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { + dev_kfree_skb_any(msdu); + continue; + } + ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list); } rcu_read_unlock(); done: - return num_buffs_reaped; + return total_num_buffs_reaped; } void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) @@ -3833,7 +4067,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) ath12k_hal_srng_access_begin(ab, srng); while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { - tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); + tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); switch (tag) { case HAL_REO_GET_QUEUE_STATS_STATUS: @@ -3900,7 +4134,7 @@ void ath12k_dp_rx_free(struct ath12k_base *ab) ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { if (ab->hw_params->rx_mac_buf_ring) ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); } @@ -3909,7 +4143,6 @@ void ath12k_dp_rx_free(struct ath12k_base *ab) ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); - ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring); ath12k_dp_rxdma_buf_free(ab); } @@ -3927,7 +4160,7 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) struct htt_rx_ring_tlv_filter tlv_filter = {0}; u32 ring_id; int ret; - u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz; + u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; @@ -3940,14 +4173,20 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) tlv_filter.rx_packet_offset = hal_rx_desc_sz; tlv_filter.rx_mpdu_start_offset = - ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset(); + ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); tlv_filter.rx_msdu_end_offset = - ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset(); + ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); + + if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { + tlv_filter.rx_mpdu_start_wmask = + ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start(); + tlv_filter.rx_msdu_end_wmask = + ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end(); + ath12k_dbg(ab, ATH12K_DBG_DATA, + "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n", + tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask); + } - /* TODO: Selectively subscribe to required qwords within msdu_end - * and mpdu_start and setup the mask in below msg - * and modify the rx_desc struct - */ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0, HAL_RXDMA_BUF, DP_RXDMA_REFILL_RING_SIZE, @@ -3961,8 +4200,8 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) struct ath12k_dp *dp = &ab->dp; struct htt_rx_ring_tlv_filter tlv_filter = {0}; u32 ring_id; - int ret; - u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz; + int ret = 0; + u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; int i; ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; @@ -3978,16 +4217,16 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); tlv_filter.rx_mpdu_start_offset = - ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset(); + ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); tlv_filter.rx_msdu_end_offset = - ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset(); + ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); /* TODO: Selectively subscribe to required qwords within msdu_end * and mpdu_start and setup the mask in below msg * and modify the rx_desc struct */ - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, HAL_RXDMA_BUF, @@ -4014,7 +4253,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) } if (ab->hw_params->rx_mac_buf_ring) { - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i, HAL_RXDMA_BUF); @@ -4046,15 +4285,6 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) ret); return ret; } - - ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id; - ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, - 0, HAL_TX_MONITOR_BUF); - if (ret) { - ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", - ret); - return ret; - } } ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); @@ -4071,15 +4301,9 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab) struct ath12k_dp *dp = &ab->dp; int i, ret; - idr_init(&dp->rx_refill_buf_ring.bufs_idr); - spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); - idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); - idr_init(&dp->tx_mon_buf_ring.bufs_idr); - spin_lock_init(&dp->tx_mon_buf_ring.idr_lock); - ret = ath12k_dp_srng_setup(ab, &dp->rx_refill_buf_ring.refill_buf_ring, HAL_RXDMA_BUF, 0, 0, @@ -4090,11 +4314,11 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab) } if (ab->hw_params->rx_mac_buf_ring) { - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ret = ath12k_dp_srng_setup(ab, &dp->rx_mac_buf_ring[i], HAL_RXDMA_BUF, 1, - i, 1024); + i, DP_RX_MAC_BUF_RING_SIZE); if (ret) { ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", i); @@ -4122,15 +4346,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab) ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); return ret; } - - ret = ath12k_dp_srng_setup(ab, - &dp->tx_mon_buf_ring.refill_buf_ring, - HAL_TX_MONITOR_BUF, 0, 0, - DP_TX_MONITOR_BUF_RING_SIZE); - if (ret) { - ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n"); - return ret; - } } ret = ath12k_dp_rxdma_buf_setup(ab); @@ -4159,7 +4374,7 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) return ret; } - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ring_id = dp->rxdma_mon_dst_ring[i].ring_id; ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, @@ -4170,17 +4385,6 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) i, ret); return ret; } - - ring_id = dp->tx_mon_dst_ring[i].ring_id; - ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, - mac_id + i, - HAL_TX_MONITOR_DST); - if (ret) { - ath12k_warn(ab, - "failed to configure tx_mon_dst_ring %d %d\n", - i, ret); - return ret; - } } out: return 0; @@ -4224,29 +4428,3 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) return 0; } - -int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab) -{ - /* start reap timer */ - mod_timer(&ab->mon_reap_timer, - jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL)); - - return 0; -} - -int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer) -{ - int ret; - - if (stop_timer) - del_timer_sync(&ab->mon_reap_timer); - - /* reap all the monitor related rings */ - ret = ath12k_dp_purge_mon_ring(ab); - if (ret) { - ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret); - return ret; - } - - return 0; -} @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_RX_H #define ATH12K_DP_RX_H @@ -85,10 +85,12 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi) } int ath12k_dp_rx_ampdu_start(struct ath12k *ar, - struct ieee80211_ampdu_params *params); + struct ieee80211_ampdu_params *params, + u8 link_id); int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, - struct ieee80211_ampdu_params *params); -int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif, + struct ieee80211_ampdu_params *params, + u8 link_id); +int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, const u8 *peer_addr, enum set_key_cmd key_cmd, struct ieee80211_key_conf *key); @@ -116,16 +118,13 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id, struct napi_struct *napi, int budget); -int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id, +int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, struct dp_rxdma_ring *rx_ring, - int req_entries, - enum hal_rx_buf_return_buf_manager mgr, - bool hw_cc); + struct list_head *used_list, + int req_entries); int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar); int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id); -int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab); -int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer); u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, struct hal_rx_desc *desc); struct ath12k_peer * @@ -142,4 +141,8 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu); int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab); int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab); +int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data); #endif /* ATH12K_DP_RX_H */ @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -10,7 +10,7 @@ #include "hw.h" static enum hal_tcl_encap_type -ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb) +ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath12k_base *ab = arvif->ar->ab; @@ -106,11 +106,10 @@ static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp * return desc; } -static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd, +static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, + struct hal_tx_msdu_ext_desc *tcl_ext_cmd, struct hal_tx_info *ti) { - struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd; - tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr, HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO); tcl_ext_cmd->info1 = le32_encode_bits(0x0, @@ -125,7 +124,99 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd, HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE); } -int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, +#define HTT_META_DATA_ALIGNMENT 0x8 + +static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len) +{ + struct sk_buff *tail; + void *metadata; + + if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0)) + return NULL; + + metadata = pskb_put(skb, tail, tail_len); + memset(metadata, 0, tail_len); + return metadata; +} + +/* Preparing HTT Metadata when utilized with ext MSDU */ +static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb) +{ + struct hal_tx_msdu_metadata *desc_ext; + u8 htt_desc_size; + /* Size rounded of multiple of 8 bytes */ + u8 htt_desc_size_aligned; + + htt_desc_size = sizeof(struct hal_tx_msdu_metadata); + htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT); + + desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned); + if (!desc_ext) + return -ENOMEM; + + desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) | + le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) | + le32_encode_bits(1, + HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL); + + return 0; +} + +static void ath12k_dp_tx_move_payload(struct sk_buff *skb, + unsigned long delta, + bool head) +{ + unsigned long len = skb->len; + + if (head) { + skb_push(skb, delta); + memmove(skb->data, skb->data + delta, len); + skb_trim(skb, len); + } else { + skb_put(skb, delta); + memmove(skb->data + delta, skb->data, len); + skb_pull(skb, delta); + } +} + +static int ath12k_dp_tx_align_payload(struct ath12k_base *ab, + struct sk_buff **pskb) +{ + u32 iova_mask = ab->hw_params->iova_mask; + unsigned long offset, delta1, delta2; + struct sk_buff *skb2, *skb = *pskb; + unsigned int headroom = skb_headroom(skb); + int tailroom = skb_tailroom(skb); + int ret = 0; + + offset = (unsigned long)skb->data & iova_mask; + delta1 = offset; + delta2 = iova_mask - offset + 1; + + if (headroom >= delta1) { + ath12k_dp_tx_move_payload(skb, delta1, true); + } else if (tailroom >= delta2) { + ath12k_dp_tx_move_payload(skb, delta2, false); + } else { + skb2 = skb_realloc_headroom(skb, iova_mask); + if (!skb2) { + ret = -ENOMEM; + goto out; + } + + dev_kfree_skb_any(skb); + + offset = (unsigned long)skb2->data & iova_mask; + if (offset) + ath12k_dp_tx_move_payload(skb2, offset, true); + *pskb = skb2; + } + +out: + return ret; +} + +int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, struct sk_buff *skb) { struct ath12k_base *ab = ar->ab; @@ -139,6 +230,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, struct sk_buff *skb_ext_desc; struct hal_srng *tcl_ring; struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath12k_vif *ahvif = arvif->ahvif; struct dp_tx_ring *tx_ring; u8 pool_id; u8 hal_ring_id; @@ -146,13 +238,15 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, u8 ring_selector, ring_map = 0; bool tcl_ring_retry; bool msdu_ext_desc = false; + bool add_htt_metadata = false; + u32 iova_mask = ab->hw_params->iova_mask; if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) return -ESHUTDOWN; if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && !ieee80211_is_data(hdr->frame_control)) - return -ENOTSUPP; + return -EOPNOTSUPP; pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1); @@ -181,7 +275,7 @@ tcl_ring_sel: ti.bank_id = arvif->bank_id; ti.meta_data_flags = arvif->tcl_metadata; - if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && + if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) { if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) { ti.encrypt_type = @@ -241,6 +335,23 @@ tcl_ring_sel: goto fail_remove_tx_buf; } + if (iova_mask && + (unsigned long)skb->data & iova_mask) { + ret = ath12k_dp_tx_align_payload(ab, &skb); + if (ret) { + ath12k_warn(ab, "failed to align TX buffer %d\n", ret); + /* don't bail out, give original buffer + * a chance even unaligned. + */ + goto map; + } + + /* hdr is pointing to a wrong place after alignment, + * so refresh it for later use. + */ + hdr = (void *)skb->data; + } +map: ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, ti.paddr)) { atomic_inc(&ab->soc_stats.tx_err.misc_fail); @@ -249,12 +360,24 @@ tcl_ring_sel: goto fail_remove_tx_buf; } + if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) && + !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) && + !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) && + ieee80211_has_protected(hdr->frame_control)) { + /* Add metadata for sw encrypted vlan group traffic */ + add_htt_metadata = true; + msdu_ext_desc = true; + ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW); + ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW; + ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; + } + tx_desc->skb = skb; tx_desc->mac_id = ar->pdev_idx; ti.desc_id = tx_desc->desc_id; ti.data_len = skb->len; skb_cb->paddr = ti.paddr; - skb_cb->vif = arvif->vif; + skb_cb->vif = ahvif->vif; skb_cb->ar = ar; if (msdu_ext_desc) { @@ -270,6 +393,15 @@ tcl_ring_sel: msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data; ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti); + if (add_htt_metadata) { + ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc); + if (ret < 0) { + ath12k_dbg(ab, ATH12K_DBG_DP_TX, + "Failed to add HTT meta data, dropping packet\n"); + goto fail_unmap_dma; + } + } + ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data, skb_ext_desc->len, DMA_TO_DEVICE); ret = dma_mapping_error(ab->dev, ti.paddr); @@ -301,7 +433,7 @@ tcl_ring_sel: spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; - /* Checking for available tcl descritors in another ring in + /* Checking for available tcl descriptors in another ring in * case of failure due to full tcl ring now, is better than * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. @@ -330,8 +462,11 @@ tcl_ring_sel: fail_unmap_dma: dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); - dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, - sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); + + if (skb_cb->paddr_ext_desc) + dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, + sizeof(struct hal_tx_msdu_ext_desc), + DMA_TO_DEVICE); fail_remove_tx_buf: ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id); @@ -350,15 +485,15 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab, u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); skb_cb = ATH12K_SKB_CB(msdu); + ar = ab->pdevs[pdev_id].ar; dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (skb_cb->paddr_ext_desc) dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->ah->hw, msdu); - ar = ab->pdevs[pdev_id].ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); } @@ -391,15 +526,19 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab, if (ts->acked) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; + info->status.ack_signal = ts->ack_rssi; + + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR; + info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } else { info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; } } - ieee80211_tx_status(ar->hw, msdu); + ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu); } static void @@ -412,7 +551,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, struct ath12k_dp_htt_wbm_tx_status ts = {0}; enum hal_wbm_htt_tx_comp_status wbm_status; - status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET; + status_desc = desc; wbm_status = le32_get_bits(status_desc->info0, HTT_TX_WBM_COMP_INFO0_STATUS); @@ -446,6 +585,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, struct hal_tx_status *ts) { struct ath12k_base *ab = ar->ab; + struct ath12k_hw *ah = ar->ah; struct ieee80211_tx_info *info; struct ath12k_skb_cb *skb_cb; @@ -464,12 +604,12 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, rcu_read_lock(); if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ah->hw, msdu); goto exit; } if (!skb_cb->vif) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ah->hw, msdu); goto exit; } @@ -479,24 +619,46 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, /* skip tx rate update from ieee80211_status*/ info->status.rates[0].idx = -1; - if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && - !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { - info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; - info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; - } + switch (ts->status) { + case HAL_WBM_TQM_REL_REASON_FRAME_ACKED: + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ack_signal = ts->ack_rssi; - if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX && - (info->flags & IEEE80211_TX_CTL_NO_ACK)) - info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR; + + info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; + } + break; + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: + if (info->flags & IEEE80211_TX_CTL_NO_ACK) { + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + break; + } + fallthrough; + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: + case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: + /* The failure status is due to internal firmware tx failure + * hence drop the frame; do not update the status of frame to + * the upper layer + */ + ieee80211_free_txskb(ah->hw, msdu); + goto exit; + default: + ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n", + ts->status); + break; + } /* NOTE: Tx rate status reporting. Tx completion status does not have * necessary information (for example nss) to build the tx rate. * Might end up reporting it out-of-band from HTT stats. */ - ieee80211_tx_status(ar->hw, msdu); + ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu); exit: rcu_read_unlock(); @@ -667,14 +829,6 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab, *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; *htt_ring_type = HTT_SW_TO_HW_RING; break; - case HAL_TX_MONITOR_BUF: - *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING; - *htt_ring_type = HTT_SW_TO_HW_RING; - break; - case HAL_TX_MONITOR_DST: - *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING; - *htt_ring_type = HTT_HW_TO_SW_RING; - break; default: ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type); ret = -EINVAL; @@ -835,7 +989,7 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab) if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) { ath12k_err(ab, "unsupported htt major version %d supported version is %d\n", dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); - return -ENOTSUPP; + return -EOPNOTSUPP; } return 0; @@ -852,7 +1006,7 @@ int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask) int ret; int i; - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { skb = ath12k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; @@ -962,6 +1116,26 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id, HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET); } + if (tlv_filter->rx_mpdu_start_wmask > 0 && + tlv_filter->rx_msdu_end_wmask > 0) { + cmd->info2 |= + le32_encode_bits(true, + HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET); + cmd->rx_mpdu_start_end_mask = + le32_encode_bits(tlv_filter->rx_mpdu_start_wmask, + HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK); + /* mpdu_end is not used for any hardwares so far + * please assign it in future if any chip is + * using through hal ops + */ + cmd->rx_mpdu_start_end_mask |= + le32_encode_bits(tlv_filter->rx_mpdu_end_wmask, + HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK); + cmd->rx_msdu_end_word_mask = + le32_encode_bits(tlv_filter->rx_msdu_end_wmask, + HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK); + } + ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb); if (ret) goto err_free; @@ -985,6 +1159,7 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, struct htt_ext_stats_cfg_cmd *cmd; int len = sizeof(*cmd); int ret; + u32 pdev_id; skb = ath12k_htc_alloc_skb(ab, len); if (!skb) @@ -996,7 +1171,8 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, memset(cmd, 0, sizeof(*cmd)); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; - cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id; + pdev_id = ath12k_mac_get_target_pdev_id(ar); + cmd->hdr.pdev_mask = 1 << pdev_id; cmd->hdr.stats_type = type; cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0); @@ -1022,13 +1198,7 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset) struct ath12k_base *ab = ar->ab; int ret; - ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset); - if (ret) { - ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret); - return ret; - } - - ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset); + ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset); if (ret) { ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret); return ret; @@ -1187,31 +1357,3 @@ err_free: dev_kfree_skb_any(skb); return ret; } - -int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset) -{ - struct ath12k_base *ab = ar->ab; - struct ath12k_dp *dp = &ab->dp; - struct htt_tx_ring_tlv_filter tlv_filter = {0}; - int ret, ring_id; - - ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id; - - /* TODO: Need to set upstream/downstream tlv filters - * here - */ - - if (ab->hw_params->rxdma1_enable) { - ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0, - HAL_TX_MONITOR_BUF, - DP_RXDMA_REFILL_RING_SIZE, - &tlv_filter); - if (ret) { - ath12k_err(ab, - "failed to setup filter for monitor buf %d\n", ret); - return ret; - } - } - - return 0; -} @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_TX_H @@ -12,11 +12,11 @@ struct ath12k_dp_htt_wbm_tx_status { bool acked; - int ack_rssi; + s8 ack_rssi; }; int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab); -int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, +int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, struct sk_buff *skb); void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id); @@ -36,6 +36,5 @@ int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id, int mac_id, enum hal_ring_type ring_type, int tx_buf_size, struct htt_tx_ring_tlv_filter *htt_tlv_filter); -int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset); int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset); #endif @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" + +#include "debug.h" + +static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab, + const char *name) +{ + size_t magic_len, len, ie_len; + int ie_id, i, index, bit, ret; + struct ath12k_fw_ie *hdr; + const u8 *data; + __le32 *timestamp; + + ab->fw.fw = ath12k_core_firmware_request(ab, name); + if (IS_ERR(ab->fw.fw)) { + ret = PTR_ERR(ab->fw.fw); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to load %s: %d\n", name, ret); + ab->fw.fw = NULL; + return ret; + } + + data = ab->fw.fw->data; + len = ab->fw.fw->size; + + /* magic also includes the null byte, check that as well */ + magic_len = strlen(ATH12K_FIRMWARE_MAGIC) + 1; + + if (len < magic_len) { + ath12k_err(ab, "firmware image too small to contain magic: %zu\n", + len); + ret = -EINVAL; + goto err; + } + + if (memcmp(data, ATH12K_FIRMWARE_MAGIC, magic_len) != 0) { + ath12k_err(ab, "Invalid firmware magic\n"); + ret = -EINVAL; + goto err; + } + + /* jump over the padding */ + magic_len = ALIGN(magic_len, 4); + + /* make sure there's space for padding */ + if (magic_len > len) { + ath12k_err(ab, "No space for padding after magic\n"); + ret = -EINVAL; + goto err; + } + + len -= magic_len; + data += magic_len; + + /* loop elements */ + while (len > sizeof(struct ath12k_fw_ie)) { + hdr = (struct ath12k_fw_ie *)data; + + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data += sizeof(*hdr); + + if (len < ie_len) { + ath12k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n", + ie_id, len, ie_len); + ret = -EINVAL; + goto err; + } + + switch (ie_id) { + case ATH12K_FW_IE_TIMESTAMP: + if (ie_len != sizeof(u32)) + break; + + timestamp = (__le32 *)data; + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "found fw timestamp %d\n", + le32_to_cpup(timestamp)); + break; + case ATH12K_FW_IE_FEATURES: + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "found firmware features ie (%zd B)\n", + ie_len); + + for (i = 0; i < ATH12K_FW_FEATURE_COUNT; i++) { + index = i / 8; + bit = i % 8; + + if (index == ie_len) + break; + + if (data[index] & (1 << bit)) + __set_bit(i, ab->fw.fw_features); + } + + ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "features", "", + ab->fw.fw_features, + sizeof(ab->fw.fw_features)); + break; + case ATH12K_FW_IE_AMSS_IMAGE: + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "found fw image ie (%zd B)\n", + ie_len); + + ab->fw.amss_data = data; + ab->fw.amss_len = ie_len; + break; + case ATH12K_FW_IE_M3_IMAGE: + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "found m3 image ie (%zd B)\n", + ie_len); + + ab->fw.m3_data = data; + ab->fw.m3_len = ie_len; + break; + case ATH12K_FW_IE_AMSS_DUALMAC_IMAGE: + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "found dualmac fw image ie (%zd B)\n", + ie_len); + ab->fw.amss_dualmac_data = data; + ab->fw.amss_dualmac_len = ie_len; + break; + default: + ath12k_warn(ab, "Unknown FW IE: %u\n", ie_id); + break; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + /* make sure there's space for padding */ + if (ie_len > len) + break; + + len -= ie_len; + data += ie_len; + } + + return 0; + +err: + release_firmware(ab->fw.fw); + ab->fw.fw = NULL; + return ret; +} + +void ath12k_fw_map(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_fw_request_firmware_api_n(ab, ATH12K_FW_API2_FILE); + if (ret == 0) + ab->fw.api_version = 2; + else + ab->fw.api_version = 1; + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "using fw api %d\n", + ab->fw.api_version); +} + +void ath12k_fw_unmap(struct ath12k_base *ab) +{ + release_firmware(ab->fw.fw); + memset(&ab->fw, 0, sizeof(ab->fw)); +} @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_FW_H +#define ATH12K_FW_H + +#define ATH12K_FW_API2_FILE "firmware-2.bin" +#define ATH12K_FIRMWARE_MAGIC "QCOM-ATH12K-FW" + +enum ath12k_fw_ie_type { + ATH12K_FW_IE_TIMESTAMP = 0, + ATH12K_FW_IE_FEATURES = 1, + ATH12K_FW_IE_AMSS_IMAGE = 2, + ATH12K_FW_IE_M3_IMAGE = 3, + ATH12K_FW_IE_AMSS_DUALMAC_IMAGE = 4, +}; + +enum ath12k_fw_features { + /* The firmware supports setting the QRTR id via register + * PCIE_LOCAL_REG_QRTR_NODE_ID + */ + ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0, + + /* The firmware supports MLO capability */ + ATH12K_FW_FEATURE_MLO, + + /* keep last */ + ATH12K_FW_FEATURE_COUNT, +}; + +void ath12k_fw_map(struct ath12k_base *ab); +void ath12k_fw_unmap(struct ath12k_base *ab); + +#endif /* ATH12K_FW_H */ @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/dma-mapping.h> #include "hal_tx.h" @@ -181,7 +181,7 @@ static const struct hal_srng_config hw_srng_config_template[] = { .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE, }, [HAL_TX_MONITOR_BUF] = { - .start_ring_id = HAL_SRNG_SW2TXMON_BUF0, + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, .max_rings = 1, .entry_size = sizeof(struct hal_mon_buf_ring) >> 2, .mac_type = ATH12K_HAL_SRNG_PMAC, @@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) { - return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) & - RX_MSDU_END_INFO5_DA_IS_MCBC; + return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) & + RX_MPDU_START_INFO6_MCAST_BCAST; } static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, @@ -626,6 +626,21 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab) return 0; } +static u16 ath12k_hal_qcn9274_rx_mpdu_start_wmask_get(void) +{ + return QCN9274_MPDU_START_WMASK; +} + +static u32 ath12k_hal_qcn9274_rx_msdu_end_wmask_get(void) +{ + return QCN9274_MSDU_END_WMASK; +} + +static const struct hal_rx_ops *ath12k_hal_qcn9274_get_hal_rx_compact_ops(void) +{ + return &hal_rx_qcn9274_compact_ops; +} + static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc) { return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14, @@ -680,7 +695,17 @@ static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc) return errmap; } -const struct hal_ops hal_qcn9274_ops = { +static u32 ath12k_hw_qcn9274_get_rx_desc_size(void) +{ + return sizeof(struct hal_rx_desc_qcn9274); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc) +{ + return 0; +} + +const struct hal_rx_ops hal_rx_qcn9274_ops = { .rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu, .rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu, .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes, @@ -712,13 +737,367 @@ const struct hal_ops hal_qcn9274_ops = { .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr, .rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr, .rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl, - .create_srng_config = ath12k_hal_srng_create_config_qcn9274, - .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map, .dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done, .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail, .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail, .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted, .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err, + .rx_desc_get_desc_size = ath12k_hw_qcn9274_get_rx_desc_size, + .rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link, +}; + +static bool ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu(struct hal_rx_desc *desc) +{ + return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, + RX_MSDU_END_INFO5_FIRST_MSDU); +} + +static bool ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu(struct hal_rx_desc *desc) +{ + return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, + RX_MSDU_END_INFO5_LAST_MSDU); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) +{ + return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, + RX_MSDU_END_INFO5_L3_HDR_PADDING); +} + +static bool ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, + RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID); +} + +static u32 ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2, + RX_MPDU_START_INFO2_ENC_TYPE); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_decap_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11, + RX_MSDU_END_INFO11_DECAP_FORMAT); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info11, + RX_MSDU_END_INFO11_MESH_CTRL_PRESENT); +} + +static bool +ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID); +} + +static bool ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_FCTRL_VALID); +} + +static u16 +ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_NUM); +} + +static u16 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10, + RX_MSDU_END_INFO10_MSDU_LENGTH); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_SGI); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_RATE_MCS); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_RECV_BW); +} + +static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_PKT_TYPE); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_MIMO_SS_BITMAP); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) +{ + return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, + RX_MSDU_END_INFO5_TID); +} + +static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id); +} + +static void ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end; +} + +static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id); +} + +static void +ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) +{ + u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10); + + info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH); + desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info); +} + +static u8 *ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) +{ + return &desc->u.qcn9274_compact.msdu_payload[0]; +} + +static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset(void) +{ + return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start); +} + +static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset(void) +{ + return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end); +} + +static bool ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR2_VALID; +} + +static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) +{ + return desc->u.qcn9274_compact.mpdu_start.addr2; +} + +static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) & + RX_MPDU_START_INFO6_MCAST_BCAST; +} + +static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, + struct ieee80211_hdr *hdr) +{ + hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl; + hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration; + ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1); + ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2); + ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3); + if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR4_VALID) { + ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4); + } + hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl; +} + +static void +ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, + u8 *crypto_hdr, + enum hal_encrypt_type enctype) +{ + unsigned int key_id; + + switch (enctype) { + case HAL_ENCRYPT_TYPE_OPEN: + return; + case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: + case HAL_ENCRYPT_TYPE_TKIP_MIC: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[1] = 0; + crypto_hdr[2] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]); + break; + case HAL_ENCRYPT_TYPE_CCMP_128: + case HAL_ENCRYPT_TYPE_CCMP_256: + case HAL_ENCRYPT_TYPE_GCMP_128: + case HAL_ENCRYPT_TYPE_AES_GCMP_256: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[1] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[2] = 0; + break; + case HAL_ENCRYPT_TYPE_WEP_40: + case HAL_ENCRYPT_TYPE_WEP_104: + case HAL_ENCRYPT_TYPE_WEP_128: + case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: + case HAL_ENCRYPT_TYPE_WAPI: + return; + } + key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5, + RX_MPDU_START_INFO5_KEY_ID); + crypto_hdr[3] = 0x20 | (key_id << 6); + crypto_hdr[4] = + HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[5] = + HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[6] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]); + crypto_hdr[7] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]); +} + +static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl); +} + +static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14, + RX_MSDU_END_INFO14_MSDU_DONE); +} + +static bool ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13, + RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL); +} + +static bool ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13, + RX_MSDU_END_INFO13_IP_CKSUM_FAIL); +} + +static bool ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted(struct hal_rx_desc *desc) +{ + return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14, + RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) == + RX_DESC_DECRYPT_STATUS_CODE_OK); +} + +static u32 ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err(struct hal_rx_desc *desc) +{ + u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13); + u32 errmap = 0; + + if (info & RX_MSDU_END_INFO13_FCS_ERR) + errmap |= HAL_RX_MPDU_ERR_FCS; + + if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) + errmap |= HAL_RX_MPDU_ERR_DECRYPT; + + if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) + errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; + + if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) + errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; + + if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) + errmap |= HAL_RX_MPDU_ERR_OVERFLOW; + + if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; + + if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; + + return errmap; +} + +static u32 ath12k_hw_qcn9274_compact_get_rx_desc_size(void) +{ + return sizeof(struct hal_rx_desc_qcn9274_compact); +} + +static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc) +{ + return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag, + RX_MSDU_END_64_TLV_SRC_LINK_ID); +} + +const struct hal_rx_ops hal_rx_qcn9274_compact_ops = { + .rx_desc_get_first_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes, + .rx_desc_encrypt_valid = ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath12k_hw_qcn9274_compact_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = + ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = + ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv, + .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len, + .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload, + .rx_desc_get_mpdu_start_offset = + ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset, + .rx_desc_get_msdu_end_offset = + ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset, + .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2, + .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc, + .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr, + .rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr, + .rx_desc_get_mpdu_frame_ctl = + ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl, + .dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done, + .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail, + .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail, + .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted, + .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err, + .rx_desc_get_desc_size = ath12k_hw_qcn9274_compact_get_rx_desc_size, + .rx_desc_get_msdu_src_link_id = + ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link, +}; + +const struct hal_ops hal_qcn9274_ops = { + .create_srng_config = ath12k_hal_srng_create_config_qcn9274, + .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map, + .rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_qcn9274_rx_mpdu_start_wmask_get, + .rxdma_ring_wmask_rx_msdu_end = ath12k_hal_qcn9274_rx_msdu_end_wmask_get, + .get_hal_rx_compact_ops = ath12k_hal_qcn9274_get_hal_rx_compact_ops, }; static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc) @@ -824,8 +1203,8 @@ static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) { - return le16_get_bits(desc->u.wcn7850.msdu_end.info5, - RX_MSDU_END_INFO5_TID); + return le32_get_bits(desc->u.wcn7850.mpdu_start.info2, + RX_MPDU_START_INFO2_TID); } static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) @@ -889,8 +1268,8 @@ static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) static bool ath12k_hw_wcn7850_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) { - return __le16_to_cpu(desc->u.wcn7850.msdu_end.info5) & - RX_MSDU_END_INFO5_DA_IS_MCBC; + return __le32_to_cpu(desc->u.wcn7850.msdu_end.info13) & + RX_MSDU_END_INFO13_MCAST_BCAST; } static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, @@ -1134,7 +1513,17 @@ static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc) return errmap; } -const struct hal_ops hal_wcn7850_ops = { +static u32 ath12k_hw_wcn7850_get_rx_desc_size(void) +{ + return sizeof(struct hal_rx_desc_wcn7850); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc) +{ + return 0; +} + +const struct hal_rx_ops hal_rx_wcn7850_ops = { .rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu, .rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu, .rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes, @@ -1167,13 +1556,21 @@ const struct hal_ops hal_wcn7850_ops = { .rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr, .rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr, .rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl, - .create_srng_config = ath12k_hal_srng_create_config_wcn7850, - .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map, .dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done, .dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail, .dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail, .dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted, .dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err, + .rx_desc_get_desc_size = ath12k_hw_wcn7850_get_rx_desc_size, + .rx_desc_get_msdu_src_link_id = ath12k_hw_wcn7850_rx_desc_get_msdu_src_link, +}; + +const struct hal_ops hal_wcn7850_ops = { + .create_srng_config = ath12k_hal_srng_create_config_wcn7850, + .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map, + .rxdma_ring_wmask_rx_mpdu_start = NULL, + .rxdma_ring_wmask_rx_msdu_end = NULL, + .get_hal_rx_compact_ops = NULL, }; static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab) @@ -1572,14 +1969,15 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc } void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, - dma_addr_t paddr) + dma_addr_t paddr, + enum hal_rx_buf_return_buf_manager rbm) { desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK), BUFFER_ADDR_INFO0_ADDR); desc->buf_addr_info.info1 = le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT), BUFFER_ADDR_INFO1_ADDR) | - le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) | + le32_encode_bits(rbm, BUFFER_ADDR_INFO1_RET_BUF_MGR) | le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE); } @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HAL_H @@ -485,8 +485,8 @@ enum hal_srng_ring_id { HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC, HAL_SRNG_RING_ID_RXDMA_DIR_BUF, - HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0, + HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, HAL_SRNG_RING_ID_PMAC1_ID_END, }; @@ -767,15 +767,15 @@ struct hal_srng_config { }; /** - * enum hal_rx_buf_return_buf_manager + * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers * * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list - * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle - * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config - * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle - * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config - * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle - * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config + * @HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the device 0 WBM is chosen in case of a multi-device config + * @HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the device 1 WBM is chosen in case of a multi-device config + * @HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the device 2 WBM is chosen in case of a multi-device config * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW * @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host * @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host @@ -788,9 +788,9 @@ struct hal_srng_config { enum hal_rx_buf_return_buf_manager { HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST, - HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST, - HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST, - HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST, HAL_RX_BUF_RBM_FW_BM, HAL_RX_BUF_RBM_SW0_BM, HAL_RX_BUF_RBM_SW1_BM, @@ -1023,6 +1023,8 @@ struct ath12k_hal { /* shadow register configuration */ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS]; int num_shadow_reg_configured; + + u32 hal_desc_sz; }; /* Maps WBM ring number and Return Buffer Manager Id per TCL ring */ @@ -1031,7 +1033,7 @@ struct ath12k_hal_tcl_to_wbm_rbm_map { u8 rbm_id; }; -struct hal_ops { +struct hal_rx_ops { bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc); bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc); u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc); @@ -1070,18 +1072,30 @@ struct hal_ops { void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc, u8 *crypto_hdr, enum hal_encrypt_type enctype); - int (*create_srng_config)(struct ath12k_base *ab); bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc); bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc); bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc); bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc); u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_desc_size)(void); + u8 (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc); +}; + +struct hal_ops { + int (*create_srng_config)(struct ath12k_base *ab); + u16 (*rxdma_ring_wmask_rx_mpdu_start)(void); + u32 (*rxdma_ring_wmask_rx_msdu_end)(void); + const struct hal_rx_ops *(*get_hal_rx_compact_ops)(void); const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map; }; extern const struct hal_ops hal_qcn9274_ops; extern const struct hal_ops hal_wcn7850_ops; +extern const struct hal_rx_ops hal_rx_qcn9274_ops; +extern const struct hal_rx_ops hal_rx_qcn9274_compact_ops; +extern const struct hal_rx_ops hal_rx_wcn7850_ops; + u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid); void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc, int tid, u32 ba_window_size, @@ -1099,7 +1113,8 @@ dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab, dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab, struct hal_srng *srng); void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, - dma_addr_t paddr); + dma_addr_t paddr, + enum hal_rx_buf_return_buf_manager rbm); u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type); void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr, u32 len, u32 id, u8 byte_swap_data); diff --git a/hal_desc.h b/hal_desc.h index 6c17adc6d60b..7b0403d245e5 100644 --- a/hal_desc.h +++ b/hal_desc.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -522,7 +522,7 @@ enum hal_tlv_tag { HAL_PHYRXHT_SIG_USR_SU = 468 /* 0x1d4 */, HAL_PHYRXHT_SIG_USR_MU_MIMO = 469 /* 0x1d5 */, HAL_PHYRX_GENERIC_U_SIG = 470 /* 0x1d6 */, - HAL_PHYRX_GENERICHT_SIG = 471 /* 0x1d7 */, + HAL_PHYRX_GENERIC_EHT_SIG = 471 /* 0x1d7 */, HAL_OVERWRITE_RESP_START = 472 /* 0x1d8 */, HAL_OVERWRITE_RESP_PREAMBLE_INFO = 473 /* 0x1d9 */, HAL_OVERWRITE_RESP_FRAME_INFO = 474 /* 0x1da */, @@ -579,9 +579,11 @@ struct hal_tlv_hdr { #define HAL_TLV_64_HDR_TAG GENMASK(9, 1) #define HAL_TLV_64_HDR_LEN GENMASK(21, 10) +#define HAL_TLV_64_USR_ID GENMASK(31, 26) +#define HAL_TLV_64_ALIGN 8 struct hal_tlv_64_hdr { - u64 tl; + __le64 tl; u8 value[]; } __packed; @@ -597,8 +599,30 @@ struct hal_tlv_64_hdr { #define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27) #define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28) -/* TODO revisit after meta data is concluded */ -#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0) +/* Peer Metadata classification */ + +/* Version 0 */ +#define RX_MPDU_DESC_META_DATA_V0_PEER_ID GENMASK(15, 0) +#define RX_MPDU_DESC_META_DATA_V0_VDEV_ID GENMASK(23, 16) + +/* Version 1 */ +#define RX_MPDU_DESC_META_DATA_V1_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1_LOGICAL_LINK_ID GENMASK(15, 14) +#define RX_MPDU_DESC_META_DATA_V1_VDEV_ID GENMASK(23, 16) +#define RX_MPDU_DESC_META_DATA_V1_LMAC_ID GENMASK(25, 24) +#define RX_MPDU_DESC_META_DATA_V1_DEVICE_ID GENMASK(28, 26) + +/* Version 1A */ +#define RX_MPDU_DESC_META_DATA_V1A_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1A_VDEV_ID GENMASK(21, 14) +#define RX_MPDU_DESC_META_DATA_V1A_LOGICAL_LINK_ID GENMASK(25, 22) +#define RX_MPDU_DESC_META_DATA_V1A_DEVICE_ID GENMASK(28, 26) + +/* Version 1B */ +#define RX_MPDU_DESC_META_DATA_V1B_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1B_VDEV_ID GENMASK(21, 14) +#define RX_MPDU_DESC_META_DATA_V1B_HW_LINK_ID GENMASK(25, 22) +#define RX_MPDU_DESC_META_DATA_V1B_DEVICE_ID GENMASK(28, 26) struct rx_mpdu_desc { __le32 info0; /* %RX_MPDU_DESC_INFO */ @@ -2048,6 +2072,19 @@ struct hal_wbm_release_ring { * fw with fw_reason2. * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by * fw with fw_reason3. + * @HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE: Remove command initiated by + * fw with disable queue. + * @HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING: Remove command initiated by + * fw to remove all mpdu until 1st non-match. + * @HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: Dropped due to drop threshold + * criteria + * @HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL: Dropped due to link desc + * not available + * @HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU: Dropped due drop bit set or + * null flow + * @HAL_WBM_TQM_REL_REASON_MULTICAST_DROP: Dropped due mcast drop set for VDEV + * @HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP: Dropped due to being set with + * 'TCL_drop_reason' */ enum hal_wbm_tqm_rel_reason { HAL_WBM_TQM_REL_REASON_FRAME_ACKED, @@ -2058,6 +2095,13 @@ enum hal_wbm_tqm_rel_reason { HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1, HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2, HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3, + HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE, + HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING, + HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD, + HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL, + HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU, + HAL_WBM_TQM_REL_REASON_MULTICAST_DROP, + HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP, }; struct hal_wbm_buffer_ring { @@ -2500,13 +2544,13 @@ struct hal_rx_reo_queue { #define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30) #define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31) -#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0) -#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8) -#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10) -#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11) -#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23) -#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24) -#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25) +#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(9, 0) +#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(11, 10) +#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(12) +#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(24, 13) +#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(25) +#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(26) +#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(27) struct hal_reo_update_rx_queue { struct hal_reo_cmd_hdr cmd; @@ -2517,6 +2561,12 @@ struct hal_reo_update_rx_queue { __le32 pn[4]; } __packed; +struct hal_rx_reo_queue_1k { + struct hal_desc_header desc_hdr; + __le32 rx_bitmap_1023_288[23]; + __le32 reserved[8]; +} __packed; + #define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0) #define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1) @@ -2958,4 +3008,29 @@ struct hal_mon_dest_desc { * updated by SRNG. */ +#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG BIT(8) +#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE GENMASK(16, 15) +#define HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL BIT(31) + +struct hal_tx_msdu_metadata { + __le32 info0; + __le32 rsvd0[6]; +} __packed; + +/* hal_tx_msdu_metadata + * valid_encrypt_type + * if set, encrypt type is valid + * encrypt_type + * 0 = NO_ENCRYPT, + * 1 = ENCRYPT, + * 2 ~ 3 - Reserved + * host_tx_desc_pool + * If set, Firmware allocates tx_descriptors + * in WAL_BUFFERID_TX_HOST_DATA_EXP,instead + * of WAL_BUFFERID_TX_TCL_DATA_EXP. + * Use cases: + * Any time firmware uses TQM-BYPASS for Data + * TID, firmware expect host to set this bit. + */ + #endif /* ATH12K_HAL_DESC_H */ @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "debug.h" @@ -26,8 +26,8 @@ static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv, { struct hal_reo_get_queue_stats *desc; - tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) | - u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); + tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) | + le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); desc = (struct hal_reo_get_queue_stats *)tlv->value; memset_startat(desc, 0, queue_addr_lo); @@ -59,8 +59,8 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal, hal->current_blk_index = avail_slot; } - tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) | - u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); + tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) | + le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); desc = (struct hal_reo_flush_cache *)tlv->value; memset_startat(desc, 0, cache_addr_lo); @@ -97,8 +97,8 @@ static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv, { struct hal_reo_update_rx_queue *desc; - tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) | - u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); + tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) | + le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); desc = (struct hal_reo_update_rx_queue *)tlv->value; memset_startat(desc, 0, queue_addr_lo); @@ -247,7 +247,7 @@ int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng, case HAL_REO_CMD_UNBLOCK_CACHE: case HAL_REO_CMD_FLUSH_TIMEOUT_LIST: ath12k_warn(ab, "Unsupported reo command %d\n", type); - ret = -ENOTSUPP; + ret = -EOPNOTSUPP; break; default: ath12k_warn(ab, "Unknown reo command %d\n", type); @@ -688,23 +688,28 @@ void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab, u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid) { - u32 num_ext_desc; + u32 num_ext_desc, num_1k_desc = 0; if (ba_window_size <= 1) { if (tid != HAL_DESC_REO_NON_QOS_TID) num_ext_desc = 1; else num_ext_desc = 0; + } else if (ba_window_size <= 105) { num_ext_desc = 1; } else if (ba_window_size <= 210) { num_ext_desc = 2; - } else { + } else if (ba_window_size <= 256) { num_ext_desc = 3; + } else { + num_ext_desc = 10; + num_1k_desc = 1; } return sizeof(struct hal_rx_reo_queue) + - (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)); + (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)) + + (num_1k_desc * sizeof(struct hal_rx_reo_queue_1k)); } void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc, @@ -713,8 +718,6 @@ void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc, { struct hal_rx_reo_queue_ext *ext_desc; - memset(qdesc, 0, sizeof(*qdesc)); - ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED, HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0); @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HAL_RX_H @@ -19,7 +19,7 @@ struct hal_rx_wbm_rel_info { bool hw_cc_done; }; -#define HAL_INVALID_PEERID 0xffff +#define HAL_INVALID_PEERID 0x3fff #define VHT_SIG_SU_NSS_MASK 0x7 #define HAL_RX_MAX_MCS 12 @@ -61,6 +61,7 @@ enum hal_rx_bw { HAL_RX_BW_40MHZ, HAL_RX_BW_80MHZ, HAL_RX_BW_160MHZ, + HAL_RX_BW_320MHZ, HAL_RX_BW_MAX, }; @@ -155,6 +156,7 @@ struct hal_rx_mon_ppdu_info { u32 preamble_type; u32 mpdu_len; u16 chan_num; + u16 freq; u16 tcp_msdu_count; u16 tcp_ack_msdu_count; u16 udp_msdu_count; @@ -231,21 +233,27 @@ struct hal_rx_mon_ppdu_info { u8 medium_prot_type; }; -#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0) +#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0) +#define HAL_RX_PPDU_START_INFO1_CHAN_NUM GENMASK(15, 0) +#define HAL_RX_PPDU_START_INFO1_CHAN_FREQ GENMASK(31, 16) struct hal_rx_ppdu_start { __le32 info0; - __le32 chan_num; - __le32 ppdu_start_ts; + __le32 info1; + __le32 ppdu_start_ts_31_0; + __le32 ppdu_start_ts_63_32; + __le32 rsvd[2]; } __packed; -#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16) +#define HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID GENMASK(13, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO0_DEVICE_ID GENMASK(15, 14) +#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(26, 16) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(10, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(11) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(12) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(13) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(24, 21) #define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0) #define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16) @@ -261,8 +269,8 @@ struct hal_rx_ppdu_start { #define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0) #define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16) -#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT GENMASK(24, 0) -#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0) struct hal_rx_ppdu_end_user_stats { __le32 rsvd0[2]; @@ -277,9 +285,9 @@ struct hal_rx_ppdu_end_user_stats { __le32 usr_resp_ref; __le32 info6; __le32 rsvd3[4]; - __le32 mpdu_ok_cnt; + __le32 info7; __le32 rsvd4; - __le32 mpdu_err_cnt; + __le32 info8; __le32 rsvd5[2]; __le32 usr_resp_ref_ext; __le32 rsvd6; @@ -293,6 +301,7 @@ struct hal_rx_ppdu_end_user_stats_ext { __le32 info4; __le32 info5; __le32 info6; + __le32 rsvd; } __packed; #define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0) @@ -389,11 +398,9 @@ struct hal_rx_he_sig_a_su_info { #define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION BIT(25) #define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION GENMASK(6, 0) -#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING BIT(7) #define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB GENMASK(10, 8) #define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA BIT(11) #define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC BIT(12) -#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF BIT(10) #define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR GENMASK(14, 13) #define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM BIT(15) @@ -419,7 +426,7 @@ struct hal_rx_he_sig_b2_mu_info { #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0) #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11) -#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19) +#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(14) #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15) #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19) #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20) @@ -435,30 +442,35 @@ enum hal_rx_ul_reception_type { HAL_RECEPTION_TYPE_FRAMELESS }; -#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8) -#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION GENMASK(3, 0) +#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0) +#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5) +#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8) struct hal_rx_phyrx_rssi_legacy_info { - __le32 rsvd[35]; __le32 info0; + __le32 rsvd0[39]; + __le32 info1; + __le32 rsvd1; } __packed; -#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16) -#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16) -#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0) +#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16) +#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(29, 16) +#define HAL_RX_MPDU_START_INFO1_DEVICE_ID GENMASK(31, 30) +#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0) struct hal_rx_mpdu_start { + __le32 rsvd0[9]; __le32 info0; __le32 info1; - __le32 rsvd1[11]; + __le32 rsvd1[2]; __le32 info2; - __le32 rsvd2[9]; + __le32 rsvd2[16]; } __packed; #define HAL_RX_PPDU_END_DURATION GENMASK(23, 0) struct hal_rx_ppdu_end_duration { __le32 rsvd0[9]; __le32 info0; - __le32 rsvd1[4]; + __le32 rsvd1[18]; } __packed; struct hal_rx_rxpcu_classification_overview { @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HAL_TX_H @@ -57,7 +57,7 @@ struct hal_tx_info { struct hal_tx_status { enum hal_wbm_rel_src_module buf_rel_source; enum hal_wbm_tqm_rel_reason status; - u8 ack_rssi; + s8 ack_rssi; u32 flags; /* %HAL_TX_STATUS_FLAGS_ */ u32 ppdu_id; u8 try_cnt; @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HIF_H @@ -10,17 +10,17 @@ #include "core.h" struct ath12k_hif_ops { - u32 (*read32)(struct ath12k_base *sc, u32 address); - void (*write32)(struct ath12k_base *sc, u32 address, u32 data); - void (*irq_enable)(struct ath12k_base *sc); - void (*irq_disable)(struct ath12k_base *sc); - int (*start)(struct ath12k_base *sc); - void (*stop)(struct ath12k_base *sc); - int (*power_up)(struct ath12k_base *sc); - void (*power_down)(struct ath12k_base *sc); + u32 (*read32)(struct ath12k_base *ab, u32 address); + void (*write32)(struct ath12k_base *ab, u32 address, u32 data); + void (*irq_enable)(struct ath12k_base *ab); + void (*irq_disable)(struct ath12k_base *ab); + int (*start)(struct ath12k_base *ab); + void (*stop)(struct ath12k_base *ab); + int (*power_up)(struct ath12k_base *ab); + void (*power_down)(struct ath12k_base *ab, bool is_suspend); int (*suspend)(struct ath12k_base *ab); int (*resume)(struct ath12k_base *ab); - int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id, + int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, @@ -30,6 +30,8 @@ struct ath12k_hif_ops { void (*ce_irq_enable)(struct ath12k_base *ab); void (*ce_irq_disable)(struct ath12k_base *ab); void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx); + int (*panic_handler)(struct ath12k_base *ab); + void (*coredump_download)(struct ath12k_base *ab); }; static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, @@ -133,12 +135,31 @@ static inline void ath12k_hif_write32(struct ath12k_base *ab, u32 address, static inline int ath12k_hif_power_up(struct ath12k_base *ab) { + if (!ab->hif.ops->power_up) + return -EOPNOTSUPP; + return ab->hif.ops->power_up(ab); } -static inline void ath12k_hif_power_down(struct ath12k_base *ab) +static inline void ath12k_hif_power_down(struct ath12k_base *ab, bool is_suspend) +{ + if (!ab->hif.ops->power_down) + return; + + ab->hif.ops->power_down(ab, is_suspend); +} + +static inline int ath12k_hif_panic_handler(struct ath12k_base *ab) { - ab->hif.ops->power_down(ab); + if (!ab->hif.ops->panic_handler) + return NOTIFY_DONE; + + return ab->hif.ops->panic_handler(ab); } +static inline void ath12k_hif_coredump_download(struct ath12k_base *ab) +{ + if (ab->hif.ops->coredump_download) + ab->hif.ops->coredump_download(ab); +} #endif /* ATH12K_HIF_H */ @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/skbuff.h> #include <linux/ctype.h> @@ -244,6 +244,11 @@ static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack) complete(&ab->htc_suspend); } +static void ath12k_htc_wakeup_from_suspend(struct ath12k_base *ab) +{ + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot wakeup from suspend is received\n"); +} + void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, struct sk_buff *skb) { @@ -349,6 +354,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, ath12k_htc_suspend_complete(ab, false); break; case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID: + ath12k_htc_wakeup_from_suspend(ab); break; default: ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n", @@ -358,7 +364,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, goto out; } - ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n", + ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %p\n", eid, skb); ep->ep_ops.ep_rx_complete(ab, skb); @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/types.h> @@ -15,6 +15,10 @@ #include "mhi.h" #include "dp_rx.h" +static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec, + 0x90, 0xd6, 0x02, 0x42, + 0xac, 0x12, 0x00, 0x03); + static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx) { return pdev_idx; @@ -540,9 +544,6 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = { }, .rx_mon_dest = { 0, 0, 0, - ATH12K_RX_MON_RING_MASK_0, - ATH12K_RX_MON_RING_MASK_1, - ATH12K_RX_MON_RING_MASK_2, }, .rx = { 0, 0, 0, 0, @@ -568,16 +569,15 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = { ATH12K_HOST2RXDMA_RING_MASK_0, }, .tx_mon_dest = { - ATH12K_TX_MON_RING_MASK_0, - ATH12K_TX_MON_RING_MASK_1, + 0, 0, 0, }, }; static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = { .tx = { ATH12K_TX_RING_MASK_0, + ATH12K_TX_RING_MASK_1, ATH12K_TX_RING_MASK_2, - ATH12K_TX_RING_MASK_4, }, .rx_mon_dest = { }, @@ -880,13 +880,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_params = &ath12k_hw_hal_params_qcn9274, .rxdma1_enable = false, - .num_rxmda_per_pdev = 1, + .num_rxdma_per_pdev = 1, .num_rxdma_dst_ring = 0, .rx_mac_buf_ring = false, .vdev_start_delay = false, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_AP_VLAN), .supports_monitor = false, .idle_ps = false, @@ -896,7 +898,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .reoq_lut_support = false, .supports_shadow_regs = false, - .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274), .num_tcl_banks = 48, .max_tx_ring = 4, @@ -907,6 +908,26 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_ops = &hal_qcn9274_ops, .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01), + + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, + + .rddm_size = 0x600000, + + .def_num_link = 0, + .max_mlo_peer = 256, + + .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB, + + .supports_sta_ps = false, + + .acpi_guid = NULL, + .supports_dynamic_smps_6ghz = true, + + .iova_mask = 0, + + .supports_aspm = false, }, { .name = "wcn7850 hw2.0", @@ -937,22 +958,25 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_params = &ath12k_hw_hal_params_wcn7850, .rxdma1_enable = false, - .num_rxmda_per_pdev = 2, + .num_rxdma_per_pdev = 2, .num_rxdma_dst_ring = 1, .rx_mac_buf_ring = true, .vdev_start_delay = true, - .interface_modes = BIT(NL80211_IFTYPE_STATION), + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .idle_ps = true, .download_calib = false, - .supports_suspend = false, + .supports_suspend = true, .tcl_ring_retry = false, .reoq_lut_support = false, .supports_shadow_regs = true, - .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850), .num_tcl_banks = 7, .max_tx_ring = 3, @@ -964,6 +988,26 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) | BIT(CNSS_PCIE_PERST_NO_PULL_V01), + + .rfkill_pin = 48, + .rfkill_cfg = 0, + .rfkill_on_level = 1, + + .rddm_size = 0x780000, + + .def_num_link = 2, + .max_mlo_peer = 32, + + .otp_board_id_register = 0, + + .supports_sta_ps = true, + + .acpi_guid = &wcn7850_uuid, + .supports_dynamic_smps_6ghz = false, + + .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1, + + .supports_aspm = true, }, { .name = "qcn9274 hw2.0", @@ -973,7 +1017,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, - .max_radios = 1, + .max_radios = 2, .single_pdev_only = false, .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274, .internal_sleep_clock = false, @@ -992,13 +1036,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_params = &ath12k_hw_hal_params_qcn9274, .rxdma1_enable = false, - .num_rxmda_per_pdev = 1, + .num_rxdma_per_pdev = 1, .num_rxdma_dst_ring = 0, .rx_mac_buf_ring = false, .vdev_start_delay = false, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_AP_VLAN), .supports_monitor = false, .idle_ps = false, @@ -1008,7 +1054,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .reoq_lut_support = false, .supports_shadow_regs = false, - .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274), .num_tcl_banks = 48, .max_tx_ring = 4, @@ -1019,6 +1064,26 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_ops = &hal_qcn9274_ops, .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01), + + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, + + .rddm_size = 0x600000, + + .def_num_link = 0, + .max_mlo_peer = 256, + + .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB, + + .supports_sta_ps = false, + + .acpi_guid = NULL, + .supports_dynamic_smps_6ghz = true, + + .iova_mask = 0, + + .supports_aspm = false, }, }; @@ -1,13 +1,14 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HW_H #define ATH12K_HW_H #include <linux/mhi.h> +#include <linux/uuid.h> #include "wmi.h" #include "hal.h" @@ -17,19 +18,30 @@ /* Num VDEVS per radio */ #define TARGET_NUM_VDEVS (16 + 1) -#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS) +#define TARGET_NUM_PEERS_PDEV_SINGLE (TARGET_NUM_STATIONS_SINGLE + \ + TARGET_NUM_VDEVS) +#define TARGET_NUM_PEERS_PDEV_DBS (TARGET_NUM_STATIONS_DBS + \ + TARGET_NUM_VDEVS) +#define TARGET_NUM_PEERS_PDEV_DBS_SBS (TARGET_NUM_STATIONS_DBS_SBS + \ + TARGET_NUM_VDEVS) /* Num of peers for Single Radio mode */ -#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV) +#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV_SINGLE) /* Num of peers for DBS */ -#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV) +#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV_DBS) /* Num of peers for DBS_SBS */ -#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV) +#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV_DBS_SBS) -/* Max num of stations (per radio) */ -#define TARGET_NUM_STATIONS 512 +/* Max num of stations for Single Radio mode */ +#define TARGET_NUM_STATIONS_SINGLE 512 + +/* Max num of stations for DBS */ +#define TARGET_NUM_STATIONS_DBS 128 + +/* Max num of stations for DBS_SBS */ +#define TARGET_NUM_STATIONS_DBS_SBS 128 #define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x #define TARGET_NUM_PEER_KEYS 2 @@ -66,7 +78,9 @@ #define TARGET_NUM_WDS_ENTRIES 32 #define TARGET_DMA_BURST_SIZE 1 #define TARGET_RX_BATCHMODE 1 +#define TARGET_EMA_MAX_PROFILE_PERIOD 8 +#define ATH12K_HW_DEFAULT_QUEUE 0 #define ATH12K_HW_MAX_QUEUES 4 #define ATH12K_QUEUE_LEN 4096 @@ -82,6 +96,8 @@ #define ATH12K_M3_FILE "m3.bin" #define ATH12K_REGDB_FILE_NAME "regdb.bin" +#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128 + enum ath12k_hw_rate_cck { ATH12K_HW_RATE_CCK_LP_11M = 0, ATH12K_HW_RATE_CCK_LP_5_5M, @@ -159,7 +175,7 @@ struct ath12k_hw_params { const struct ath12k_hw_hal_params *hal_params; bool rxdma1_enable:1; - int num_rxmda_per_pdev; + int num_rxdma_per_pdev; int num_rxdma_dst_ring; bool rx_mac_buf_ring:1; bool vdev_start_delay:1; @@ -173,8 +189,8 @@ struct ath12k_hw_params { bool tcl_ring_retry:1; bool reoq_lut_support:1; bool supports_shadow_regs:1; + bool supports_aspm:1; - u32 hal_desc_sz; u32 num_tcl_banks; u32 max_tx_ring; @@ -186,6 +202,24 @@ struct ath12k_hw_params { const struct hal_ops *hal_ops; u64 qmi_cnss_feature_bitmap; + + u32 rfkill_pin; + u32 rfkill_cfg; + u32 rfkill_on_level; + + u32 rddm_size; + + u8 def_num_link; + u16 max_mlo_peer; + + u32 otp_board_id_register; + + bool supports_sta_ps; + + const guid_t *acpi_guid; + bool supports_dynamic_smps_6ghz; + + u32 iova_mask; }; struct ath12k_hw_ops { @@ -236,10 +270,16 @@ enum ath12k_bd_ie_board_type { ATH12K_BD_IE_BOARD_DATA = 1, }; +enum ath12k_bd_ie_regdb_type { + ATH12K_BD_IE_REGDB_NAME = 0, + ATH12K_BD_IE_REGDB_DATA = 1, +}; + enum ath12k_bd_ie_type { /* contains sub IEs of enum ath12k_bd_ie_board_type */ ATH12K_BD_IE_BOARD = 0, - ATH12K_BD_IE_BOARD_EXT = 1, + /* contains sub IEs of enum ath12k_bd_ie_regdb_type */ + ATH12K_BD_IE_REGDB = 1, }; struct ath12k_hw_regs { @@ -309,6 +349,18 @@ struct ath12k_hw_regs { u32 hal_reo_status_ring_base; }; +static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type) +{ + switch (type) { + case ATH12K_BD_IE_BOARD: + return "board data"; + case ATH12K_BD_IE_REGDB: + return "regdb data"; + } + + return "unknown"; +} + int ath12k_hw_init(struct ath12k_base *ab); #endif @@ -1,11 +1,13 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <net/mac80211.h> +#include <net/cfg80211.h> #include <linux/etherdevice.h> + #include "mac.h" #include "core.h" #include "debug.h" @@ -14,6 +16,9 @@ #include "dp_tx.h" #include "dp_rx.h" #include "peer.h" +#include "debugfs.h" +#include "hif.h" +#include "wow.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ @@ -90,6 +95,10 @@ static const struct ieee80211_channel ath12k_5ghz_channels[] = { }; static const struct ieee80211_channel ath12k_6ghz_channels[] = { + /* Operating Class 136 */ + CHAN6G(2, 5935, 0), + + /* Operating Classes 131-135 */ CHAN6G(1, 5955, 0), CHAN6G(5, 5975, 0), CHAN6G(9, 5995, 0), @@ -241,8 +250,11 @@ static const u32 ath12k_smps_map[] = { [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, }; -static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); +static int ath12k_start_vdev_delay(struct ath12k *ar, + struct ath12k_link_vif *arvif); +static void ath12k_mac_stop(struct ath12k *ar); +static int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif); +static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arvif); static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) { @@ -343,6 +355,9 @@ ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw) case ATH12K_BW_160: ret = RATE_INFO_BW_160; break; + case ATH12K_BW_320: + ret = RATE_INFO_BW_320; + break; } return ret; @@ -359,6 +374,8 @@ enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw b return ATH12K_BW_80; case RATE_INFO_BW_160: return ATH12K_BW_160; + case RATE_INFO_BW_320: + return ATH12K_BW_320; default: return ATH12K_BW_20; } @@ -460,24 +477,66 @@ static u8 ath12k_parse_mpdudensity(u8 mpdudensity) } } -static int ath12k_mac_vif_chan(struct ieee80211_vif *vif, - struct cfg80211_chan_def *def) +static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id, + struct cfg80211_chan_def *def) { + struct ieee80211_bss_conf *link_conf; struct ieee80211_chanctx_conf *conf; rcu_read_lock(); - conf = rcu_dereference(vif->bss_conf.chanctx_conf); + link_conf = rcu_dereference(vif->link_conf[link_id]); + + if (!link_conf) { + rcu_read_unlock(); + return -ENOLINK; + } + + conf = rcu_dereference(link_conf->chanctx_conf); if (!conf) { rcu_read_unlock(); return -ENOENT; } - *def = conf->def; rcu_read_unlock(); return 0; } +static struct ieee80211_bss_conf * +ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif) +{ + struct ieee80211_vif *vif = arvif->ahvif->vif; + struct ieee80211_bss_conf *link_conf; + struct ath12k *ar = arvif->ar; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) + return NULL; + + link_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, + vif->link_conf[arvif->link_id]); + + return link_conf; +} + +static struct ieee80211_link_sta *ath12k_mac_get_link_sta(struct ath12k_link_sta *arsta) +{ + struct ath12k_sta *ahsta = arsta->ahsta; + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); + struct ieee80211_link_sta *link_sta; + + lockdep_assert_wiphy(ahsta->ahvif->ah->hw->wiphy); + + if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) + return NULL; + + link_sta = wiphy_dereference(ahsta->ahvif->ah->hw->wiphy, + sta->link[arsta->link_id]); + + return link_sta; +} + static bool ath12k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { @@ -523,21 +582,38 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath12k_vif_iter *arvif_iter = data; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + unsigned long links_map = ahvif->links_map; + struct ath12k_link_vif *arvif; + u8 link_id; + + for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = rcu_dereference(ahvif->link[link_id]); - if (arvif->vdev_id == arvif_iter->vdev_id) - arvif_iter->arvif = arvif; + if (WARN_ON(!arvif)) + continue; + + if (arvif->vdev_id == arvif_iter->vdev_id && + arvif->ar == arvif_iter->ar) { + arvif_iter->arvif = arvif; + break; + } + } } -struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) +struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) { struct ath12k_vif_iter arvif_iter = {}; u32 flags; + /* To use the arvif returned, caller must have held rcu read lock. + */ + WARN_ON(!rcu_read_lock_any_held()); arvif_iter.vdev_id = vdev_id; + arvif_iter.ar = ar; flags = IEEE80211_IFACE_ITER_RESUME_ALL; - ieee80211_iterate_active_interfaces_atomic(ar->hw, + ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), flags, ath12k_get_arvif_iter, &arvif_iter); @@ -549,16 +625,17 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) return arvif_iter.arvif; } -struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, - u32 vdev_id) +struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, + u32 vdev_id) { int i; struct ath12k_pdev *pdev; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); - if (pdev && pdev->ar) { + if (pdev && pdev->ar && + (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) { arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id); if (arvif) return arvif; @@ -607,6 +684,161 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id) return NULL; } +static bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + + lockdep_assert_wiphy(ahvif->ah->hw->wiphy); + + if (ahvif->vif->valid_links & BIT(arvif->link_id)) + return true; + + return false; +} + +static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw, + struct ieee80211_channel *channel) +{ + struct ath12k_hw *ah = hw->priv; + struct ath12k *ar; + int i; + + ar = ah->radio; + + if (ah->num_radio == 1) + return ar; + + for_each_ar(ah, ar, i) { + if (channel->center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) && + channel->center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq)) + return ar; + } + return NULL; +} + +static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + if (!ctx) + return NULL; + + return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan); +} + +static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u8 link_id) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(hw->wiphy); + + /* If there is one pdev within ah, then we return + * ar directly. + */ + if (ah->num_radio == 1) + return ah->radio; + + if (!(ahvif->links_map & BIT(link_id))) + return NULL; + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (arvif && arvif->is_created) + return arvif->ar; + + return NULL; +} + +void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + void *data) +{ + struct ath12k_mac_get_any_chanctx_conf_arg *arg = data; + struct ath12k *ctx_ar = ath12k_get_ar_by_ctx(hw, conf); + + if (ctx_ar == arg->ar) + arg->chanctx_conf = conf; +} + +static struct ath12k_link_vif *ath12k_mac_get_vif_up(struct ath12k *ar) +{ + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_up) + return arvif; + } + + return NULL; +} + +static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2) +{ + switch (band1) { + case NL80211_BAND_2GHZ: + if (band2 & WMI_HOST_WLAN_2G_CAP) + return true; + break; + case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: + if (band2 & WMI_HOST_WLAN_5G_CAP) + return true; + break; + default: + return false; + } + + return false; +} + +static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_link_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + struct ath12k_base *ab = ar->ab; + struct ieee80211_vif *vif = arvif->ahvif->vif; + struct cfg80211_chan_def def; + enum nl80211_band band; + u8 pdev_id = ab->fw_pdev[0].pdev_id; + int i; + + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) + return pdev_id; + + band = def.chan->band; + + for (i = 0; i < ab->fw_pdev_count; i++) { + if (ath12k_mac_band_match(band, ab->fw_pdev[i].supported_bands)) + return ab->fw_pdev[i].pdev_id; + } + + return pdev_id; +} + +u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar) +{ + struct ath12k_link_vif *arvif; + struct ath12k_base *ab = ar->ab; + + if (!ab->hw_params->single_pdev_only) + return ar->pdev->pdev_id; + + arvif = ath12k_mac_get_vif_up(ar); + + /* fw_pdev array has pdev ids derived from phy capability + * service ready event (pdev_and_hw_link_ids). + * If no vif is active, return default first index. + */ + if (!arvif) + return ar->ab->fw_pdev[0].pdev_id; + + /* If active vif is found, return the pdev id matching chandef band */ + return ath12k_mac_get_target_pdev_id_from_vif(arvif); +} + static void ath12k_pdev_caps_update(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; @@ -628,11 +860,11 @@ static void ath12k_pdev_caps_update(struct ath12k *ar) static int ath12k_mac_txpower_recalc(struct ath12k *ar) { struct ath12k_pdev *pdev = ar->pdev; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret, txpower = -1; u32 param; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->txpower <= 0) @@ -682,13 +914,13 @@ fail: return ret; } -static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif) +static int ath12k_recalc_rtscts_prot(struct ath12k_link_vif *arvif) { struct ath12k *ar = arvif->ar; u32 vdev_param, rts_cts; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS; @@ -721,7 +953,7 @@ static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif) return ret; } -static int ath12k_mac_set_kickout(struct ath12k_vif *arvif) +static int ath12k_mac_set_kickout(struct ath12k_link_vif *arvif) { struct ath12k *ar = arvif->ar; u32 param; @@ -771,11 +1003,14 @@ void ath12k_mac_peer_cleanup_all(struct ath12k *ar) struct ath12k_peer *peer, *tmp; struct ath12k_base *ab = ar->ab; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); spin_lock_bh(&ab->base_lock); list_for_each_entry_safe(peer, tmp, &ab->peers, list) { - ath12k_dp_rx_peer_tid_cleanup(ar, peer); + /* Skip Rx TID cleanup for self peer */ + if (peer->sta) + ath12k_dp_rx_peer_tid_cleanup(ar, peer); + list_del(&peer->list); kfree(peer); } @@ -787,7 +1022,7 @@ void ath12k_mac_peer_cleanup_all(struct ath12k *ar) static int ath12k_mac_vdev_setup_sync(struct ath12k *ar) { - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) return -ESHUTDOWN; @@ -804,9 +1039,12 @@ static int ath12k_mac_vdev_setup_sync(struct ath12k *ar) static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id) { + struct ath12k_wmi_vdev_up_params params = {}; int ret; - ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); + params.vdev_id = vdev_id; + params.bssid = ar->mac_addr; + ret = ath12k_wmi_vdev_up(ar, ¶ms); if (ret) { ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); @@ -823,9 +1061,10 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, { struct ieee80211_channel *channel; struct wmi_vdev_start_req_arg arg = {}; + struct ath12k_wmi_vdev_up_params params = {}; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); channel = chandef->chan; arg.vdev_id = vdev_id; @@ -863,7 +1102,9 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, return ret; } - ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); + params.vdev_id = vdev_id; + params.bssid = ar->mac_addr; + ret = ath12k_wmi_vdev_up(ar, ¶ms); if (ret) { ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); @@ -886,7 +1127,7 @@ static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar) { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->vdev_setup_done); @@ -916,9 +1157,8 @@ static int ath12k_mac_monitor_vdev_create(struct ath12k *ar) struct ath12k_wmi_vdev_create_arg arg = {}; int bit, ret; u8 tmp_addr[6]; - u16 nss; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (ar->monitor_vdev_created) return 0; @@ -956,19 +1196,6 @@ static int ath12k_mac_monitor_vdev_create(struct ath12k *ar) return ret; } - nss = hweight32(ar->cfg_tx_chainmask) ? : 1; - ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id, - WMI_VDEV_PARAM_NSS, nss); - if (ret) { - ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", - ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret); - return ret; - } - - ret = ath12k_mac_txpower_recalc(ar); - if (ret) - return ret; - ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id; ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); ar->num_created_vdevs++; @@ -984,7 +1211,7 @@ static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar) int ret; unsigned long time_left; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!ar->monitor_vdev_created) return 0; @@ -1030,12 +1257,12 @@ static int ath12k_mac_monitor_start(struct ath12k *ar) struct cfg80211_chan_def *chandef = NULL; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (ar->monitor_started) return 0; - ieee80211_iter_chan_contexts_atomic(ar->hw, + ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), ath12k_mac_get_any_chandef_iter, &chandef); if (!chandef) @@ -1060,7 +1287,7 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar) { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!ar->monitor_started) return 0; @@ -1078,95 +1305,407 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar) return ret; } -static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) +int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif) { - struct ath12k *ar = hw->priv; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k *ar = arvif->ar; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + reinit_completion(&ar->vdev_setup_done); + + ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n", + arvif->vdev_id, ret); + goto err; + } + + ret = ath12k_mac_vdev_setup_sync(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n", + arvif->vdev_id, ret); + goto err; + } + + WARN_ON(ar->num_started_vdevs == 0); + + ar->num_started_vdevs--; + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", + ahvif->vif->addr, arvif->vdev_id); + + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { + clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags); + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n", + arvif->vdev_id); + } + + return 0; +err: + return ret; +} + +static int ath12k_mac_config(struct ath12k *ar, u32 changed) +{ + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); struct ieee80211_conf *conf = &hw->conf; int ret = 0; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR; if (ar->monitor_conf_enabled) { if (ar->monitor_vdev_created) - goto exit; + return ret; ret = ath12k_mac_monitor_vdev_create(ar); if (ret) - goto exit; + return ret; ret = ath12k_mac_monitor_start(ar); if (ret) goto err_mon_del; } else { if (!ar->monitor_vdev_created) - goto exit; + return ret; ret = ath12k_mac_monitor_stop(ar); if (ret) - goto exit; + return ret; ath12k_mac_monitor_vdev_delete(ar); } } -exit: - mutex_unlock(&ar->conf_mutex); return ret; err_mon_del: ath12k_mac_monitor_vdev_delete(ar); - mutex_unlock(&ar->conf_mutex); return ret; } -static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) +static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + ar = ath12k_ah_to_ar(ah, 0); + + ret = ath12k_mac_config(ar, changed); + if (ret) + ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n", + ar->pdev_idx, ret); + + return ret; +} + +static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_link_vif *arvif, + struct sk_buff *bcn) +{ + struct ath12k *ar = arvif->ar; + struct ieee80211_mgmt *mgmt; + const u8 *p2p_ie; + int ret; + + mgmt = (void *)bcn->data; + p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, + mgmt->u.beacon.variable, + bcn->len - (mgmt->u.beacon.variable - + bcn->data)); + if (!p2p_ie) { + ath12k_warn(ar->ab, "no P2P ie found in beacon\n"); + return -ENOENT; + } + + ret = ath12k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); + if (ret) { + ath12k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, + u8 oui_type, size_t ie_offset) +{ + const u8 *next, *end; + size_t len; + u8 *ie; + + if (WARN_ON(skb->len < ie_offset)) + return -EINVAL; + + ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, + skb->data + ie_offset, + skb->len - ie_offset); + if (!ie) + return -ENOENT; + + len = ie[1] + 2; + end = skb->data + skb->len; + next = ie + len; + + if (WARN_ON(next > end)) + return -EINVAL; + + memmove(ie, next, end - next); + skb_trim(skb, skb->len - len); + + return 0; +} + +static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_buff *bcn, + u8 bssid_index, bool *nontx_profile_found) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data; + const struct element *elem, *nontx, *index, *nie; + const u8 *start, *tail; + u16 rem_len; + u8 i; + + start = bcn->data + ieee80211_get_hdrlen_from_skb(bcn) + sizeof(mgmt->u.beacon); + tail = skb_tail_pointer(bcn); + rem_len = tail - start; + + arvif->rsnie_present = false; + arvif->wpaie_present = false; + + if (cfg80211_find_ie(WLAN_EID_RSN, start, rem_len)) + arvif->rsnie_present = true; + if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, + start, rem_len)) + arvif->wpaie_present = true; + + /* Return from here for the transmitted profile */ + if (!bssid_index) + return; + + /* Initial rsnie_present for the nontransmitted profile is set to be same as that + * of the transmitted profile. It will be changed if security configurations are + * different. + */ + *nontx_profile_found = false; + for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, rem_len) { + /* Fixed minimum MBSSID element length with at least one + * nontransmitted BSSID profile is 12 bytes as given below; + * 1 (max BSSID indicator) + + * 2 (Nontransmitted BSSID profile: Subelement ID + length) + + * 4 (Nontransmitted BSSID Capabilities: tag + length + info) + * 2 (Nontransmitted BSSID SSID: tag + length) + * 3 (Nontransmitted BSSID Index: tag + length + BSSID index + */ + if (elem->datalen < 12 || elem->data[0] < 1) + continue; /* Max BSSID indicator must be >=1 */ + + for_each_element(nontx, elem->data + 1, elem->datalen - 1) { + start = nontx->data; + + if (nontx->id != 0 || nontx->datalen < 4) + continue; /* Invalid nontransmitted profile */ + + if (nontx->data[0] != WLAN_EID_NON_TX_BSSID_CAP || + nontx->data[1] != 2) { + continue; /* Missing nontransmitted BSS capabilities */ + } + + if (nontx->data[4] != WLAN_EID_SSID) + continue; /* Missing SSID for nontransmitted BSS */ + + index = cfg80211_find_elem(WLAN_EID_MULTI_BSSID_IDX, + start, nontx->datalen); + if (!index || index->datalen < 1 || index->data[0] == 0) + continue; /* Invalid MBSSID Index element */ + + if (index->data[0] == bssid_index) { + *nontx_profile_found = true; + if (cfg80211_find_ie(WLAN_EID_RSN, + nontx->data, + nontx->datalen)) { + arvif->rsnie_present = true; + return; + } else if (!arvif->rsnie_present) { + return; /* Both tx and nontx BSS are open */ + } + + nie = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + nontx->data, + nontx->datalen); + if (!nie || nie->datalen < 2) + return; /* Invalid non-inheritance element */ + + for (i = 1; i < nie->datalen - 1; i++) { + if (nie->data[i] == WLAN_EID_RSN) { + arvif->rsnie_present = false; + break; + } + } + + return; + } + } + } +} + +static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_bss_conf *bss_conf; + struct ath12k_wmi_bcn_tmpl_ema_arg ema_args; + struct ieee80211_ema_beacons *beacons; + struct ath12k_link_vif *tx_arvif; + bool nontx_profile_found = false; + struct ath12k_vif *tx_ahvif; + int ret = 0; + u8 i; + + bss_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!bss_conf) { + ath12k_warn(arvif->ar->ab, + "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n", + ahvif->vif->addr, arvif->link_id); + return -ENOLINK; + } + + tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); + tx_arvif = &tx_ahvif->deflink; + beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar), + tx_ahvif->vif, + tx_arvif->link_id); + if (!beacons || !beacons->cnt) { + ath12k_warn(arvif->ar->ab, + "failed to get ema beacon templates from mac80211\n"); + return -EPERM; + } + + if (tx_arvif == arvif) + ath12k_mac_set_arvif_ies(arvif, beacons->bcn[0].skb, 0, NULL); + + for (i = 0; i < beacons->cnt; i++) { + if (tx_arvif != arvif && !nontx_profile_found) + ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb, + bss_conf->bssid_index, + &nontx_profile_found); + + ema_args.bcn_cnt = beacons->cnt; + ema_args.bcn_index = i; + ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id, + &beacons->bcn[i].offs, + beacons->bcn[i].skb, &ema_args); + if (ret) { + ath12k_warn(tx_arvif->ar->ab, + "failed to set ema beacon template id %i error %d\n", + i, ret); + break; + } + } + + if (tx_arvif != arvif && !nontx_profile_found) + ath12k_warn(arvif->ar->ab, + "nontransmitted bssid index %u not found in beacon template\n", + bss_conf->bssid_index); + + ieee80211_beacon_free_ema_list(beacons); + return ret; +} + +static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif) { + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ieee80211_bss_conf *link_conf; + struct ath12k_link_vif *tx_arvif = arvif; struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; - struct ieee80211_hw *hw = ar->hw; - struct ieee80211_vif *vif = arvif->vif; struct ieee80211_mutable_offsets offs = {}; + struct ath12k_vif *tx_ahvif = ahvif; + bool nontx_profile_found = false; struct sk_buff *bcn; - struct ieee80211_mgmt *mgmt; - u8 *ies; int ret; - if (arvif->vdev_type != WMI_VDEV_TYPE_AP) + if (ahvif->vdev_type != WMI_VDEV_TYPE_AP) return 0; - bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf to set bcn tmpl for vif %pM link %u\n", + vif->addr, arvif->link_id); + return -ENOLINK; + } + + if (vif->mbssid_tx_vif) { + tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif); + tx_arvif = &tx_ahvif->deflink; + if (tx_arvif != arvif && arvif->is_up) + return 0; + + if (link_conf->ema_ap) + return ath12k_mac_setup_bcn_tmpl_ema(arvif); + } + + bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif, + &offs, tx_arvif->link_id); if (!bcn) { ath12k_warn(ab, "failed to get beacon template from mac80211\n"); return -EPERM; } - ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); - ies += sizeof(mgmt->u.beacon); - - if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) - arvif->rsnie_present = true; + if (tx_arvif == arvif) { + ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL); + } else { + ath12k_mac_set_arvif_ies(arvif, bcn, + link_conf->bssid_index, + &nontx_profile_found); + if (!nontx_profile_found) + ath12k_warn(ab, + "nontransmitted profile not found in beacon template\n"); + } - if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, - WLAN_OUI_TYPE_MICROSOFT_WPA, - ies, (skb_tail_pointer(bcn) - ies))) - arvif->wpaie_present = true; + if (ahvif->vif->type == NL80211_IFTYPE_AP && ahvif->vif->p2p) { + ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn); + if (ret) { + ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n", + ret); + goto free_bcn_skb; + } - ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); + /* P2P IE is inserted by firmware automatically (as + * configured above) so remove it from the base beacon + * template to avoid duplicate P2P IEs in beacon frames. + */ + ret = ath12k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, + WLAN_OUI_TYPE_WFA_P2P, + offsetof(struct ieee80211_mgmt, + u.beacon.variable)); + if (ret) { + ath12k_warn(ab, "failed to remove P2P vendor ie: %d\n", + ret); + goto free_bcn_skb; + } + } - kfree_skb(bcn); + ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL); if (ret) ath12k_warn(ab, "failed to submit beacon template command: %d\n", ret); +free_bcn_skb: + kfree_skb(bcn); return ret; } -static void ath12k_control_beaconing(struct ath12k_vif *arvif, +static void ath12k_control_beaconing(struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *info) { + struct ath12k_wmi_vdev_up_params params = {}; + struct ath12k_vif *ahvif = arvif->ahvif; struct ath12k *ar = arvif->ar; int ret; - lockdep_assert_held(&arvif->ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(arvif->ar)->wiphy); if (!info->enable_beacon) { ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); @@ -1186,12 +1725,23 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif, return; } - arvif->aid = 0; + ahvif->aid = 0; - ether_addr_copy(arvif->bssid, info->bssid); + ether_addr_copy(arvif->bssid, info->addr); - ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, - arvif->bssid); + params.vdev_id = arvif->vdev_id; + params.aid = ahvif->aid; + params.bssid = arvif->bssid; + if (ahvif->vif->mbssid_tx_vif) { + struct ath12k_vif *tx_ahvif = + ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); + struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink; + + params.tx_bssid = tx_arvif->bssid; + params.nontx_profile_idx = info->bssid_index; + params.nontx_profile_cnt = 1 << info->bssid_indicator; + } + ret = ath12k_wmi_vdev_up(arvif->ar, ¶ms); if (ret) { ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n", arvif->vdev_id, ret); @@ -1203,49 +1753,140 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif, ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); } +static void ath12k_mac_handle_beacon_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = data; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif = &ahvif->deflink; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) + return; + + cancel_delayed_work(&arvif->connection_loss_work); +} + +void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb) +{ + ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), + IEEE80211_IFACE_ITER_NORMAL, + ath12k_mac_handle_beacon_iter, + skb); +} + +static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + u32 *vdev_id = data; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif = &ahvif->deflink; + struct ath12k *ar = arvif->ar; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); + + if (arvif->vdev_id != *vdev_id) + return; + + if (!arvif->is_up) + return; + + ieee80211_beacon_loss(vif); + + /* Firmware doesn't report beacon loss events repeatedly. If AP probe + * (done by mac80211) succeeds but beacons do not resume then it + * doesn't make sense to continue operation. Queue connection loss work + * which can be cancelled when beacon is received. + */ + ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, + ATH12K_CONNECTION_LOSS_HZ); +} + +void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id) +{ + ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), + IEEE80211_IFACE_ITER_NORMAL, + ath12k_mac_handle_beacon_miss_iter, + &vdev_id); +} + +static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work) +{ + struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif, + connection_loss_work.work); + struct ieee80211_vif *vif = arvif->ahvif->vif; + + if (!arvif->is_up) + return; + + ieee80211_connection_loss(vif); +} + static void ath12k_peer_assoc_h_basic(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); + struct ieee80211_bss_conf *bss_conf; u32 aid; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); if (vif->type == NL80211_IFTYPE_STATION) aid = vif->cfg.aid; else aid = sta->aid; - ether_addr_copy(arg->peer_mac, sta->addr); + ether_addr_copy(arg->peer_mac, arsta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_associd = aid; arg->auth_flag = true; /* TODO: STA WAR in ath10k for listen interval required? */ - arg->peer_listen_intval = ar->hw->conf.listen_interval; + arg->peer_listen_intval = hw->conf.listen_interval; arg->peer_nss = 1; - arg->peer_caps = vif->bss_conf.assoc_capability; + + bss_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!bss_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc for vif %pM link %u\n", + vif->addr, arvif->link_id); + return; + } + + arg->peer_caps = bss_conf->assoc_capability; } static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ieee80211_bss_conf *info = &vif->bss_conf; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct ieee80211_bss_conf *info; struct cfg80211_chan_def def; struct cfg80211_bss *bss; - struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); const u8 *rsnie = NULL; const u8 *wpaie = NULL; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + info = ath12k_mac_get_link_bss_conf(arvif); + if (!info) { + ath12k_warn(ar->ab, "unable to access bss link conf for peer assoc crypto for vif %pM link %u\n", + vif->addr, arvif->link_id); return; + } - bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) + return; + + bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); if (arvif->rsnie_present || arvif->wpaie_present) { @@ -1265,7 +1906,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, ies->data, ies->len); rcu_read_unlock(); - cfg80211_put_bss(ar->hw->wiphy, bss); + cfg80211_put_bss(hw->wiphy, bss); } /* FIXME: base on RSN IE/WPA IE is a correct idea? */ @@ -1290,28 +1931,38 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, } static void ath12k_peer_assoc_h_rates(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; + struct ieee80211_link_sta *link_sta; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; const struct ieee80211_rate *rates; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); enum nl80211_band band; u32 ratemask; u8 rate; int i; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc rates for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + band = def.chan->band; - sband = ar->hw->wiphy->bands[band]; - ratemask = sta->deflink.supp_rates[band]; + sband = hw->wiphy->bands[band]; + ratemask = link_sta->supp_rates[band]; ratemask &= arvif->bitrate_mask.control[band].legacy; rates = sband->bitrates; @@ -1352,12 +2003,14 @@ ath12k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask) } static void ath12k_peer_assoc_h_ht(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + const struct ieee80211_sta_ht_cap *ht_cap; + struct ieee80211_link_sta *link_sta; struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -1365,11 +2018,19 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar, u8 max_nss; u32 stbc; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc ht for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + + ht_cap = &link_sta->ht_cap; if (!ht_cap->ht_supported) return; @@ -1393,7 +2054,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar, if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) arg->ldpc_flag = true; - if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { + if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) { arg->bw_40 = true; arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; } @@ -1443,7 +2104,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar, arg->peer_ht_rates.rates[i] = i; } else { arg->peer_ht_rates.num_rates = n; - arg->peer_nss = min(sta->deflink.rx_nss, max_nss); + arg->peer_nss = min(link_sta->rx_nss, max_nss); } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", @@ -1513,12 +2174,14 @@ ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set, } static void ath12k_peer_assoc_h_vht(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + const struct ieee80211_sta_vht_cap *vht_cap; + struct ieee80211_link_sta *link_sta; struct cfg80211_chan_def def; enum nl80211_band band; const u16 *vht_mcs_mask; @@ -1527,9 +2190,19 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, u8 max_nss, vht_mcs; int i; - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc vht for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + + vht_cap = &link_sta->vht_cap; if (!vht_cap->vht_supported) return; @@ -1562,10 +2235,10 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1); - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) arg->bw_80 = true; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) arg->bw_160 = true; /* Calculate peer NSS capability from VHT capabilities if STA @@ -1579,7 +2252,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, vht_mcs_mask[i]) max_nss = i + 1; } - arg->peer_nss = min(sta->deflink.rx_nss, max_nss); + arg->peer_nss = min(link_sta->rx_nss, max_nss); arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); @@ -1602,23 +2275,44 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, arg->tx_max_mcs_nss = 0xFF; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", - sta->addr, arg->peer_max_mpdu, arg->peer_flags); + arsta->addr, arg->peer_max_mpdu, arg->peer_flags); /* TODO: rxnss_override */ } static void ath12k_peer_assoc_h_he(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; int i; - u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss; + u8 ampdu_factor, max_nss; + u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; + u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; u16 mcs_160_map, mcs_80_map; bool support_160; u16 v; + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc he for vif %pM link %u", + vif->addr, arvif->link_id); + return; + } + + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + + he_cap = &link_sta->he_cap; if (!he_cap->has_he) return; @@ -1656,13 +2350,13 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, else max_nss = rx_mcs_80; - arg->peer_nss = min(sta->deflink.rx_nss, max_nss); + arg->peer_nss = min(link_sta->rx_nss, max_nss); memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info, sizeof(he_cap->he_cap_elem.mac_cap_info)); memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info, sizeof(he_cap->he_cap_elem.phy_cap_info)); - arg->peer_he_ops = vif->bss_conf.he_oper.params; + arg->peer_he_ops = link_conf->he_oper.params; /* the top most byte is used to indicate BSS color info */ arg->peer_he_ops &= 0xffffff; @@ -1679,15 +2373,14 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu * length. */ - ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK; + ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); if (ampdu_factor) { - if (sta->deflink.vht_cap.vht_supported) + if (link_sta->vht_cap.vht_supported) arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; - else if (sta->deflink.ht_cap.ht_supported) + else if (link_sta->ht_cap.ht_supported) arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; } @@ -1728,7 +2421,7 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ) arg->twt_requester = true; - switch (sta->deflink.bandwidth) { + switch (link_sta->bandwidth) { case IEEE80211_STA_RX_BW_160: if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { @@ -1761,17 +2454,114 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, } } -static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, +static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_link_sta *link_sta; + struct cfg80211_chan_def def; + enum nl80211_band band; + u8 ampdu_factor, mpdu_density; + + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) + return; + + band = def.chan->band; + + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he 6ghz for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + + he_cap = &link_sta->he_cap; + + if (!arg->he_flag || band != NL80211_BAND_6GHZ || !link_sta->he_6ghz_capa.capa) + return; + + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) + arg->bw_40 = true; + + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) + arg->bw_80 = true; + + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) + arg->bw_160 = true; + + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) + arg->bw_320 = true; + + arg->peer_he_caps_6ghz = le16_to_cpu(link_sta->he_6ghz_capa.capa); + + mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + arg->peer_mpdu_density = ath12k_parse_mpdudensity(mpdu_density); + + /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of + * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value + * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE + * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz + * Band Capabilities element in the 6 GHz band. + * + * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and + * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability. + */ + ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) + + u32_get_bits(arg->peer_he_caps_6ghz, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + + arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR + + ampdu_factor)) - 1; +} + +static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap, + const struct ieee80211_he_6ghz_capa *he_6ghz_capa, + int *smps) +{ + if (ht_cap->ht_supported) + *smps = u16_get_bits(ht_cap->cap, IEEE80211_HT_CAP_SM_PS); + else + *smps = le16_get_bits(he_6ghz_capa->capa, + IEEE80211_HE_6GHZ_CAP_SM_PS); + + if (*smps >= ARRAY_SIZE(ath12k_smps_map)) + return -EINVAL; + + return 0; +} + +static void ath12k_peer_assoc_h_smps(struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + const struct ieee80211_he_6ghz_capa *he_6ghz_capa; + struct ath12k_link_vif *arvif = arsta->arvif; + const struct ieee80211_sta_ht_cap *ht_cap; + struct ieee80211_link_sta *link_sta; + struct ath12k *ar = arvif->ar; int smps; - if (!ht_cap->ht_supported) + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n", + sta->addr, arsta->link_id); return; + } - smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; - smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + he_6ghz_capa = &link_sta->he_6ghz_capa; + ht_cap = &link_sta->ht_cap; + + if (!ht_cap->ht_supported && !he_6ghz_capa->capa) + return; + + if (ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps)) + return; switch (smps) { case WLAN_HT_CAP_SM_PS_STATIC: @@ -1789,13 +2579,13 @@ static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, } static void ath12k_peer_assoc_h_qos(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); - switch (arvif->vdev_type) { + switch (arvif->ahvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) { /* TODO: Check WME vs QoS */ @@ -1821,19 +2611,20 @@ static void ath12k_peer_assoc_h_qos(struct ath12k *ar, } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n", - sta->addr, arg->qos_flag); + arsta->addr, arg->qos_flag); } static int ath12k_peer_assoc_qos_ap(struct ath12k *ar, - struct ath12k_vif *arvif, - struct ieee80211_sta *sta) + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k_wmi_ap_ps_arg arg; u32 max_sp; u32 uapsd; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); arg.vdev_id = arvif->vdev_id; @@ -1860,26 +2651,26 @@ static int ath12k_peer_assoc_qos_ap(struct ath12k *ar, arg.param = WMI_AP_PS_PEER_PARAM_UAPSD; arg.value = uapsd; - ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg); if (ret) goto err; arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP; arg.value = max_sp; - ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg); if (ret) goto err; /* TODO: revisit during testing */ arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE; arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; - ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg); if (ret) goto err; arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD; arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; - ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg); if (ret) goto err; @@ -1891,17 +2682,17 @@ err: return ret; } -static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) +static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_link_sta *sta) { - return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> + return sta->supp_rates[NL80211_BAND_2GHZ] >> ATH12K_MAC_FIRST_OFDM_RATE_IDX; } static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar, - struct ieee80211_sta *sta) + struct ieee80211_link_sta *link_sta) { - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { - switch (sta->deflink.vht_cap.cap & + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) { + switch (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: return MODE_11AC_VHT160; @@ -1913,125 +2704,137 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar, } } - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AC_VHT80; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AC_VHT40; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AC_VHT20; return MODE_UNKNOWN; } static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar, - struct ieee80211_sta *sta) + struct ieee80211_link_sta *link_sta) { - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { - if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) { + if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) return MODE_11AX_HE160; - else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) return MODE_11AX_HE80_80; /* not sure if this is a valid case? */ return MODE_11AX_HE160; } - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AX_HE80; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AX_HE40; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AX_HE20; return MODE_UNKNOWN; } static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar, - struct ieee80211_sta *sta) + struct ieee80211_link_sta *link_sta) { - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) - if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] & + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) + if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) return MODE_11BE_EHT320; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { - if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) { + if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) return MODE_11BE_EHT160; - if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) return MODE_11BE_EHT80_80; ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n", - sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]); + link_sta->he_cap.he_cap_elem.phy_cap_info[0]); return MODE_11BE_EHT160; } - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11BE_EHT80; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11BE_EHT40; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11BE_EHT20; return MODE_UNKNOWN; } static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_link_sta *link_sta; struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + switch (band) { case NL80211_BAND_2GHZ: - if (sta->deflink.eht_cap.has_eht) { - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + if (link_sta->eht_cap.has_eht) { + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11BE_EHT40_2G; else phymode = MODE_11BE_EHT20_2G; - } else if (sta->deflink.he_cap.has_he) { - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + } else if (link_sta->he_cap.has_he) { + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) phymode = MODE_11AX_HE80_2G; - else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AX_HE40_2G; else phymode = MODE_11AX_HE20_2G; - } else if (sta->deflink.vht_cap.vht_supported && + } else if (link_sta->vht_cap.vht_supported && !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AC_VHT40; else phymode = MODE_11AC_VHT20; - } else if (sta->deflink.ht_cap.ht_supported && + } else if (link_sta->ht_cap.ht_supported && !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11NG_HT40; else phymode = MODE_11NG_HT20; - } else if (ath12k_mac_sta_has_ofdm_only(sta)) { + } else if (ath12k_mac_sta_has_ofdm_only(link_sta)) { phymode = MODE_11G; } else { phymode = MODE_11B; @@ -2040,16 +2843,16 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: /* Check EHT first */ - if (sta->deflink.eht_cap.has_eht) { - phymode = ath12k_mac_get_phymode_eht(ar, sta); - } else if (sta->deflink.he_cap.has_he) { - phymode = ath12k_mac_get_phymode_he(ar, sta); - } else if (sta->deflink.vht_cap.vht_supported && + if (link_sta->eht_cap.has_eht) { + phymode = ath12k_mac_get_phymode_eht(ar, link_sta); + } else if (link_sta->he_cap.has_he) { + phymode = ath12k_mac_get_phymode_he(ar, link_sta); + } else if (link_sta->vht_cap.vht_supported && !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { - phymode = ath12k_mac_get_phymode_vht(ar, sta); - } else if (sta->deflink.ht_cap.ht_supported && + phymode = ath12k_mac_get_phymode_vht(ar, link_sta); + } else if (link_sta->ht_cap.ht_supported && !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { - if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) + if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) phymode = MODE_11NA_HT40; else phymode = MODE_11NA_HT20; @@ -2062,7 +2865,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n", - sta->addr, ath12k_mac_phymode_str(phymode)); + arsta->addr, ath12k_mac_phymode_str(phymode)); arg->peer_phymode = phymode; WARN_ON(phymode == MODE_UNKNOWN); @@ -2132,18 +2935,30 @@ static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres, } static void ath12k_peer_assoc_h_eht(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; - const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20; const struct ieee80211_eht_mcs_nss_supp_bw *bw; - struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv; + const struct ieee80211_sta_eht_cap *eht_cap; + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_link_sta *link_sta; u32 *rx_mcs, *tx_mcs; - if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht) + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc eht for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + + eht_cap = &link_sta->eht_cap; + he_cap = &link_sta->he_cap; + if (!he_cap->has_he || !eht_cap->has_eht) return; arg->eht_flag = true; @@ -2162,7 +2977,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, rx_mcs = arg->peer_eht_rx_mcs_set; tx_mcs = arg->peer_eht_tx_mcs_set; - switch (sta->deflink.bandwidth) { + switch (link_sta->bandwidth) { case IEEE80211_STA_RX_BW_320: bw = &eht_cap->eht_mcs_nss_supp.bw._320; ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, @@ -2214,85 +3029,175 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, arg->punct_bitmap = ~arvif->punct_bitmap; } +static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct peer_assoc_mlo_params *ml = &arg->ml; + struct ath12k_sta *ahsta = arsta->ahsta; + struct ath12k_link_sta *arsta_p; + struct ath12k_link_vif *arvif; + unsigned long links; + u8 link_id; + int i; + + if (!sta->mlo || ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) + return; + + ml->enabled = true; + ml->assoc_link = arsta->is_assoc_link; + + /* For now considering the primary umac based on assoc link */ + ml->primary_umac = arsta->is_assoc_link; + ml->peer_id_valid = true; + ml->logical_link_idx_valid = true; + + ether_addr_copy(ml->mld_addr, sta->addr); + ml->logical_link_idx = arsta->link_idx; + ml->ml_peer_id = ahsta->ml_peer_id; + ml->ieee_link_id = arsta->link_id; + ml->num_partner_links = 0; + links = ahsta->links_map; + + rcu_read_lock(); + + i = 0; + + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + if (i >= ATH12K_WMI_MLO_MAX_LINKS) + break; + + arsta_p = rcu_dereference(ahsta->link[link_id]); + arvif = rcu_dereference(ahsta->ahvif->link[link_id]); + + if (arsta_p == arsta) + continue; + + if (!arvif->is_started) + continue; + + ml->partner_info[i].vdev_id = arvif->vdev_id; + ml->partner_info[i].hw_link_id = arvif->ar->pdev->hw_link_id; + ml->partner_info[i].assoc_link = arsta_p->is_assoc_link; + ml->partner_info[i].primary_umac = arsta_p->is_assoc_link; + ml->partner_info[i].logical_link_idx_valid = true; + ml->partner_info[i].logical_link_idx = arsta_p->link_idx; + ml->num_partner_links++; + + i++; + } + + rcu_read_unlock(); +} + static void ath12k_peer_assoc_prepare(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg, bool reassoc) { - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); memset(arg, 0, sizeof(*arg)); reinit_completion(&ar->peer_assoc_done); arg->peer_new_assoc = !reassoc; - ath12k_peer_assoc_h_basic(ar, vif, sta, arg); - ath12k_peer_assoc_h_crypto(ar, vif, sta, arg); - ath12k_peer_assoc_h_rates(ar, vif, sta, arg); - ath12k_peer_assoc_h_ht(ar, vif, sta, arg); - ath12k_peer_assoc_h_vht(ar, vif, sta, arg); - ath12k_peer_assoc_h_he(ar, vif, sta, arg); - ath12k_peer_assoc_h_eht(ar, vif, sta, arg); - ath12k_peer_assoc_h_qos(ar, vif, sta, arg); - ath12k_peer_assoc_h_phymode(ar, vif, sta, arg); - ath12k_peer_assoc_h_smps(sta, arg); + ath12k_peer_assoc_h_basic(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_crypto(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_rates(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_ht(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_vht(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_he(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_he_6ghz(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_eht(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_smps(arsta, arg); + ath12k_peer_assoc_h_mlo(arsta, arg); /* TODO: amsdu_disable req? */ } -static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif, +static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arvif, const u8 *addr, - const struct ieee80211_sta_ht_cap *ht_cap) + const struct ieee80211_sta_ht_cap *ht_cap, + const struct ieee80211_he_6ghz_capa *he_6ghz_capa) { - int smps; + int smps, ret = 0; - if (!ht_cap->ht_supported) + if (!ht_cap->ht_supported && !he_6ghz_capa) return 0; - smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; - smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; - - if (smps >= ARRAY_SIZE(ath12k_smps_map)) - return -EINVAL; + ret = ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps); + if (ret < 0) + return ret; return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id, WMI_PEER_MIMO_PS_STATE, ath12k_smps_map[smps]); } -static void ath12k_bss_assoc(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, +static void ath12k_bss_assoc(struct ath12k *ar, + struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *bss_conf) { - struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; - struct ath12k_wmi_peer_assoc_arg peer_arg; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ath12k_wmi_vdev_up_params params = {}; + struct ieee80211_link_sta *link_sta; + u8 link_id = bss_conf->link_id; + struct ath12k_link_sta *arsta; struct ieee80211_sta *ap_sta; + struct ath12k_sta *ahsta; struct ath12k_peer *peer; bool is_auth = false; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", - arvif->vdev_id, arvif->bssid, arvif->aid); + struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) = + kzalloc(sizeof(*peer_arg), GFP_KERNEL); + if (!peer_arg) + return; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac vdev %i link id %u assoc bssid %pM aid %d\n", + arvif->vdev_id, link_id, arvif->bssid, ahvif->aid); rcu_read_lock(); - ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); + /* During ML connection, cfg.ap_addr has the MLD address. For + * non-ML connection, it has the BSSID. + */ + ap_sta = ieee80211_find_sta(vif, vif->cfg.ap_addr); if (!ap_sta) { ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n", - bss_conf->bssid, arvif->vdev_id); + vif->cfg.ap_addr, arvif->vdev_id); + rcu_read_unlock(); + return; + } + + ahsta = ath12k_sta_to_ahsta(ap_sta); + + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, + ahsta->link[link_id]); + if (WARN_ON(!arsta)) { + rcu_read_unlock(); + return; + } + + link_sta = ath12k_mac_get_link_sta(arsta); + if (WARN_ON(!link_sta)) { rcu_read_unlock(); return; } - ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false); + ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, false); rcu_read_unlock(); - ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg); if (ret) { ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); @@ -2306,7 +3211,7 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw, } ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid, - &ap_sta->deflink.ht_cap); + &link_sta->ht_cap, &link_sta->he_6ghz_capa); if (ret) { ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -2315,10 +3220,13 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw, WARN_ON(arvif->is_up); - arvif->aid = vif->cfg.aid; + ahvif->aid = vif->cfg.aid; ether_addr_copy(arvif->bssid, bss_conf->bssid); - ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); + params.vdev_id = arvif->vdev_id; + params.aid = ahvif->aid; + params.bssid = arvif->bssid; + ret = ath12k_wmi_vdev_up(ar, ¶ms); if (ret) { ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n", arvif->vdev_id, ret); @@ -2326,6 +3234,7 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw, } arvif->is_up = true; + arvif->rekey_data.enable_offload = false; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up (associated) bssid %pM aid %d\n", @@ -2356,14 +3265,12 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw, arvif->vdev_id, ret); } -static void ath12k_bss_disassoc(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void ath12k_bss_disassoc(struct ath12k *ar, + struct ath12k_link_vif *arvif) { - struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", arvif->vdev_id, arvif->bssid); @@ -2375,7 +3282,9 @@ static void ath12k_bss_disassoc(struct ieee80211_hw *hw, arvif->is_up = false; - /* TODO: cancel connection_loss_work */ + memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data)); + + cancel_delayed_work(&arvif->connection_loss_work); } static u32 ath12k_mac_get_rate_hw_value(int bitrate) @@ -2404,21 +3313,30 @@ static u32 ath12k_mac_get_rate_hw_value(int bitrate) } static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, - struct ieee80211_vif *vif, + struct ath12k_link_vif *arvif, struct cfg80211_chan_def *def) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); const struct ieee80211_supported_band *sband; + struct ieee80211_bss_conf *bss_conf; u8 basic_rate_idx; int hw_rate_code; u32 vdev_param; u16 bitrate; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); - sband = ar->hw->wiphy->bands[def->chan->band]; - basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; + bss_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!bss_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in mgmt rate calc for vif %pM link %u\n", + vif->addr, arvif->link_id); + return; + } + + sband = hw->wiphy->bands[def->chan->band]; + basic_rate_idx = ffs(bss_conf->basic_rates) - 1; bitrate = sband->bitrates[basic_rate_idx].bitrate; hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate); @@ -2440,10 +3358,21 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); } -static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, +static int +ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + return 0; +} + +static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *info) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); struct ath12k *ar = arvif->ar; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); struct sk_buff *tmpl; int ret; u32 interval; @@ -2452,7 +3381,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, if (info->fils_discovery.max_interval) { interval = info->fils_discovery.max_interval; - tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif); + tmpl = ieee80211_get_fils_discovery_tmpl(hw, vif); if (tmpl) ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, tmpl); @@ -2460,8 +3389,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, unsol_bcast_probe_resp_enabled = 1; interval = info->unsol_bcast_probe_resp_interval; - tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw, - arvif->vif); + tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); if (tmpl) ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, tmpl); @@ -2486,13 +3414,109 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, return ret; } -static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, - u64 changed) +static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u64 changed) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + unsigned long links = ahvif->links_map; + struct ieee80211_bss_conf *info; + struct ath12k_link_vif *arvif; + struct ath12k *ar; + u8 link_id; + + lockdep_assert_wiphy(hw->wiphy); + + if (changed & BSS_CHANGED_SSID && vif->type == NL80211_IFTYPE_AP) { + ahvif->u.ap.ssid_len = vif->cfg.ssid_len; + if (vif->cfg.ssid_len) + memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); + } + + if (changed & BSS_CHANGED_ASSOC) { + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!arvif || !arvif->ar) + continue; + + ar = arvif->ar; + + if (vif->cfg.assoc) { + info = ath12k_mac_get_link_bss_conf(arvif); + if (!info) + continue; + + ath12k_bss_assoc(ar, arvif, info); + } else { + ath12k_bss_disassoc(ar, arvif); + } + } + } +} + +static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif) { - struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k *ar = arvif->ar; + struct ieee80211_vif *vif = arvif->ahvif->vif; + struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf; + enum wmi_sta_powersave_param param; + struct ieee80211_bss_conf *info; + enum wmi_sta_ps_mode psmode; + int ret; + int timeout; + bool enable_ps; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + enable_ps = arvif->ahvif->ps; + if (enable_ps) { + psmode = WMI_STA_PS_MODE_ENABLED; + param = WMI_STA_PS_PARAM_INACTIVITY_TIME; + + timeout = conf->dynamic_ps_timeout; + if (timeout == 0) { + info = ath12k_mac_get_link_bss_conf(arvif); + if (!info) { + ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n", + vif->addr, arvif->link_id); + return; + } + + /* firmware doesn't like 0 */ + timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000; + } + + ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, + timeout); + if (ret) { + ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n", + arvif->vdev_id, ret); + return; + } + } else { + psmode = WMI_STA_PS_MODE_DISABLED; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n", + arvif->vdev_id, psmode ? "enable" : "disable"); + + ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode); + if (ret) + ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n", + psmode, arvif->vdev_id, ret); +} + +static void ath12k_mac_bss_info_changed(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ieee80211_bss_conf *info, + u64 changed) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ieee80211_vif_cfg *vif_cfg = &vif->cfg; struct cfg80211_chan_def def; u32 param_id, param_value; enum nl80211_band band; @@ -2505,7 +3529,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, u8 rateidx; u32 rate; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (changed & BSS_CHANGED_BEACON_INT) { arvif->beacon_interval = info->beacon_int; @@ -2525,7 +3549,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON) { param_id = WMI_PDEV_PARAM_BEACON_TX_MODE; - param_value = WMI_BEACON_STAGGERED_MODE; + param_value = WMI_BEACON_BURST_MODE; ret = ath12k_wmi_pdev_set_param(ar, param_id, param_value, ar->pdev->pdev_id); if (ret) @@ -2533,7 +3557,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, arvif->vdev_id); else ath12k_dbg(ar->ab, ATH12K_DBG_MAC, - "Set staggered beacon mode for VDEV: %d\n", + "Set burst beacon mode for VDEV: %d\n", arvif->vdev_id); ret = ath12k_mac_setup_bcn_tmpl(arvif); @@ -2561,10 +3585,10 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_SSID && vif->type == NL80211_IFTYPE_AP) { - arvif->u.ap.ssid_len = vif->cfg.ssid_len; + ahvif->u.ap.ssid_len = vif->cfg.ssid_len; if (vif->cfg.ssid_len) - memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); - arvif->u.ap.hidden_ssid = info->hidden_ssid; + memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); + ahvif->u.ap.hidden_ssid = info->hidden_ssid; } if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) @@ -2573,8 +3597,8 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON_ENABLED) { ath12k_control_beaconing(arvif, info); - if (arvif->is_up && vif->bss_conf.he_support && - vif->bss_conf.he_oper.params) { + if (arvif->is_up && info->he_support && + info->he_oper.params) { /* TODO: Extend to support 1024 BA Bitmap size */ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_BA_MODE, @@ -2585,7 +3609,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, arvif->vdev_id); param_id = WMI_VDEV_PARAM_HEOPS_0_31; - param_value = vif->bss_conf.he_oper.params; + param_value = info->he_oper.params; ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); ath12k_dbg(ar->ab, ATH12K_DBG_MAC, @@ -2661,9 +3685,9 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) - ath12k_bss_assoc(hw, vif, info); + ath12k_bss_assoc(ar, arvif, info); else - ath12k_bss_disassoc(hw, vif); + ath12k_bss_disassoc(ar, arvif); } if (changed & BSS_CHANGED_TXPOWER) { @@ -2675,14 +3699,14 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_MCAST_RATE && - !ath12k_mac_vif_chan(arvif->vif, &def)) { + !ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) { band = def.chan->band; - mcast_rate = vif->bss_conf.mcast_rate[band]; + mcast_rate = info->mcast_rate[band]; if (mcast_rate > 0) rateidx = mcast_rate - 1; else - rateidx = ffs(vif->bss_conf.basic_rates) - 1; + rateidx = ffs(info->basic_rates) - 1; if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX; @@ -2719,8 +3743,8 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_BASIC_RATES && - !ath12k_mac_vif_chan(arvif->vif, &def)) - ath12k_recalculate_mgmt_rate(ar, vif, &def); + !ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) + ath12k_recalculate_mgmt_rate(ar, arvif, &def); if (changed & BSS_CHANGED_TWT) { if (info->twt_requester || info->twt_responder) @@ -2761,18 +3785,226 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_FILS_DISCOVERY || - changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) - ath12k_mac_fils_discovery(arvif, info); + ath12k_mac_fils_discovery(arvif, info); + + if (changed & BSS_CHANGED_PS && + ar->ab->hw_params->supports_sta_ps) { + ahvif->ps = vif_cfg->ps; + ath12k_mac_vif_setup_ps(arvif); + } +} + +static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif, + u8 link_id) +{ + if (!ahvif->cache[link_id]) { + ahvif->cache[link_id] = kzalloc(sizeof(*ahvif->cache[0]), GFP_KERNEL); + if (ahvif->cache[link_id]) + INIT_LIST_HEAD(&ahvif->cache[link_id]->key_conf.list); + } + + return ahvif->cache[link_id]; +} + +static void ath12k_ahvif_put_link_key_cache(struct ath12k_vif_cache *cache) +{ + struct ath12k_key_conf *key_conf, *tmp; + + if (!cache || list_empty(&cache->key_conf.list)) + return; + list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) { + list_del(&key_conf->list); + kfree(key_conf); + } +} + +static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id) +{ + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) + return; + + ath12k_ahvif_put_link_key_cache(ahvif->cache[link_id]); + kfree(ahvif->cache[link_id]); + ahvif->cache[link_id] = NULL; +} + +static void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed) +{ + struct ath12k *ar; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_vif_cache *cache; + struct ath12k_link_vif *arvif; + u8 link_id = info->link_id; + + lockdep_assert_wiphy(hw->wiphy); + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + + /* if the vdev is not created on a certain radio, + * cache the info to be updated later on vdev creation + */ + + if (!arvif || !arvif->is_created) { + cache = ath12k_ahvif_get_link_cache(ahvif, link_id); + if (!cache) + return; + + cache->bss_conf_changed |= changed; + + return; + } + + ar = arvif->ar; + + ath12k_mac_bss_info_changed(ar, arvif, info, changed); +} + +static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah, + struct ieee80211_vif *vif, + u8 link_id) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + int i; + + lockdep_assert_wiphy(ah->hw->wiphy); + + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + if (arvif) + return arvif; + + if (!vif->valid_links) { + /* Use deflink for Non-ML VIFs and mark the link id as 0 + */ + link_id = 0; + arvif = &ahvif->deflink; + } else { + /* If this is the first link arvif being created for an ML VIF + * use the preallocated deflink memory except for scan arvifs + */ + if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) { + arvif = &ahvif->deflink; + } else { + arvif = (struct ath12k_link_vif *) + kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL); + if (!arvif) + return NULL; + } + } + + arvif->ahvif = ahvif; + arvif->link_id = link_id; + ahvif->links_map |= BIT(link_id); + + INIT_LIST_HEAD(&arvif->list); + INIT_DELAYED_WORK(&arvif->connection_loss_work, + ath12k_mac_vif_sta_connection_loss_work); - if (changed & BSS_CHANGED_EHT_PUNCTURING) - arvif->punct_bitmap = info->eht_puncturing; + for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { + arvif->bitrate_mask.control[i].legacy = 0xffffffff; + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + } - mutex_unlock(&ar->conf_mutex); + /* Allocate Default Queue now and reassign during actual vdev create */ + vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; + for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) + vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; + + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + + rcu_assign_pointer(ahvif->link[arvif->link_id], arvif); + ahvif->links_map |= BIT(link_id); + synchronize_rcu(); + return arvif; +} + +static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_hw *ah = ahvif->ah; + + lockdep_assert_wiphy(ah->hw->wiphy); + + rcu_assign_pointer(ahvif->link[arvif->link_id], NULL); + synchronize_rcu(); + ahvif->links_map &= ~BIT(arvif->link_id); + + if (arvif != &ahvif->deflink) + kfree(arvif); + else + memset(arvif, 0, sizeof(*arvif)); +} + +static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw, + struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_hw *ah = hw->priv; + struct ath12k *ar = arvif->ar; + int ret; + + lockdep_assert_wiphy(ah->hw->wiphy); + + cancel_delayed_work_sync(&arvif->connection_loss_work); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)", + arvif->vdev_id, arvif->link_id); + + if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { + ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid); + if (ret) + ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d", + arvif->vdev_id, arvif->link_id, ret); + } + ath12k_mac_vdev_delete(ar, arvif); +} + +static struct ath12k* +ath12k_mac_select_scan_device(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 center_freq) +{ + struct ath12k_hw *ah = hw->priv; + enum nl80211_band band; + struct ath12k *ar; + int i; + + if (ah->num_radio == 1) + return ah->radio; + + /* Currently mac80211 supports splitting scan requests into + * multiple scan requests per band. + * Loop through first channel and determine the scan radio + * TODO: There could be 5 GHz low/high channels in that case + * split the hw request and perform multiple scans + */ + + if (center_freq < ATH12K_MIN_5G_FREQ) + band = NL80211_BAND_2GHZ; + else if (center_freq < ATH12K_MIN_6G_FREQ) + band = NL80211_BAND_5GHZ; + else + band = NL80211_BAND_6GHZ; + + for_each_ar(ah, ar, i) { + /* TODO 5 GHz low high split changes */ + if (ar->mac.sbands[band].channels) + return ar; + } + + return NULL; } void __ath12k_mac_scan_finish(struct ath12k *ar) { + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); + lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { @@ -2780,23 +4012,13 @@ void __ath12k_mac_scan_finish(struct ath12k *ar) break; case ATH12K_SCAN_RUNNING: case ATH12K_SCAN_ABORTING: - if (!ar->scan.is_roc) { - struct cfg80211_scan_info info = { - .aborted = (ar->scan.state == - ATH12K_SCAN_ABORTING), - }; - - ieee80211_scan_completed(ar->hw, &info); - } else if (ar->scan.roc_notify) { - ieee80211_remain_on_channel_expired(ar->hw); - } + if (ar->scan.is_roc && ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(hw); fallthrough; case ATH12K_SCAN_STARTING: - ar->scan.state = ATH12K_SCAN_IDLE; - ar->scan_channel = NULL; - ar->scan.roc_freq = 0; cancel_delayed_work(&ar->scan.timeout); complete(&ar->scan.completed); + wiphy_work_queue(ar->ah->hw->wiphy, &ar->scan.vdev_clean_wk); break; } } @@ -2816,7 +4038,7 @@ static int ath12k_scan_stop(struct ath12k *ar) }; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); /* TODO: Fill other STOP Params */ arg.pdev_id = ar->pdev->pdev_id; @@ -2837,15 +4059,15 @@ static int ath12k_scan_stop(struct ath12k *ar) } out: - /* Scan state should be updated upon scan completion but in case - * firmware fails to deliver the event (for whatever reason) it is - * desired to clean up scan state anyway. Firmware may have just - * dropped the scan completion event delivery due to transport pipe - * being overflown with data and/or it can recover on its own before - * next scan request is submitted. + /* Scan state should be updated in scan completion worker but in + * case firmware fails to deliver the event (for whatever reason) + * it is desired to clean up scan state anyway. Firmware may have + * just dropped the scan completion event delivery due to transport + * pipe being overflown with data and/or it can recover on its own + * before next scan request is submitted. */ spin_lock_bh(&ar->data_lock); - if (ar->scan.state != ATH12K_SCAN_IDLE) + if (ret) __ath12k_mac_scan_finish(ar); spin_unlock_bh(&ar->data_lock); @@ -2856,7 +4078,7 @@ static void ath12k_scan_abort(struct ath12k *ar) { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); spin_lock_bh(&ar->data_lock); @@ -2891,9 +4113,56 @@ static void ath12k_scan_timeout_work(struct work_struct *work) struct ath12k *ar = container_of(work, struct ath12k, scan.timeout.work); - mutex_lock(&ar->conf_mutex); + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); ath12k_scan_abort(ar); - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); +} + +static void ath12k_scan_vdev_clean_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct ath12k *ar = container_of(work, struct ath12k, + scan.vdev_clean_wk); + struct ath12k_hw *ah = ar->ah; + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(wiphy); + + arvif = ar->scan.arvif; + + /* The scan vdev has already been deleted. This can occur when a + * new scan request is made on the same vif with a different + * frequency, causing the scan arvif to move from one radio to + * another. Or, scan was abrupted and via remove interface, the + * arvif is already deleted. Alternatively, if the scan vdev is not + * being used as an actual vdev, then do not delete it. + */ + if (!arvif || arvif->is_started) + goto work_complete; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac clean scan vdev (link id %u)", + arvif->link_id); + + ath12k_mac_remove_link_interface(ah->hw, arvif); + ath12k_mac_unassign_link_vif(arvif); + +work_complete: + spin_lock_bh(&ar->data_lock); + ar->scan.arvif = NULL; + if (!ar->scan.is_roc) { + struct cfg80211_scan_info info = { + .aborted = ((ar->scan.state == + ATH12K_SCAN_ABORTING) || + (ar->scan.state == + ATH12K_SCAN_STARTING)), + }; + + ieee80211_scan_completed(ar->ah->hw, &info); + } + + ar->scan.state = ATH12K_SCAN_IDLE; + ar->scan_channel = NULL; + ar->scan.roc_freq = 0; + spin_unlock_bh(&ar->data_lock); } static int ath12k_start_scan(struct ath12k *ar, @@ -2901,7 +4170,7 @@ static int ath12k_start_scan(struct ath12k *ar, { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ret = ath12k_wmi_send_scan_start_cmd(ar, arg); if (ret) @@ -2930,18 +4199,104 @@ static int ath12k_start_scan(struct ath12k *ar, return 0; } +static u8 +ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar) +{ + struct ath12k_link_vif *arvif; + struct ath12k_hw *ah = ahvif->ah; + unsigned long links = ahvif->links_map; + u8 link_id; + + lockdep_assert_wiphy(ah->hw->wiphy); + + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + + if (!arvif || !arvif->is_created) + continue; + + if (ar == arvif->ar) + return link_id; + } + + /* input ar is not assigned to any of the links of ML VIF, use scan + * link (15) for scan vdev creation. + */ + return ATH12K_DEFAULT_SCAN_LINK; +} + static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { - struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; struct cfg80211_scan_request *req = &hw_req->req; - struct ath12k_wmi_scan_req_arg arg = {}; + struct ath12k_wmi_scan_req_arg *arg = NULL; + u8 link_id; int ret; int i; + bool create = true; + + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; + + /* Since the targeted scan device could depend on the frequency + * requested in the hw_req, select the corresponding radio + */ + ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq); + if (!ar) + return -EINVAL; + + /* check if any of the links of ML VIF is already started on + * radio(ar) correpsondig to given scan frequency and use it, + * if not use scan link (link 15) for scan purpose. + */ + link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar); + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac link ID %d selected for scan", + arvif->link_id); + + /* If the vif is already assigned to a specific vdev of an ar, + * check whether its already started, vdev which is started + * are not allowed to switch to a new radio. + * If the vdev is not started, but was earlier created on a + * different ar, delete that vdev and create a new one. We don't + * delete at the scan stop as an optimization to avoid redundant + * delete-create vdev's for the same ar, in case the request is + * always on the same band for the vif + */ + if (arvif->is_created) { + if (WARN_ON(!arvif->ar)) + return -EINVAL; - mutex_lock(&ar->conf_mutex); + if (ar != arvif->ar && arvif->is_started) + return -EINVAL; + + if (ar != arvif->ar) { + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); + } else { + create = false; + } + } + + if (create) { + /* Previous arvif would've been cleared in radio switch block + * above, assign arvif again for create. + */ + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + + ret = ath12k_mac_vdev_create(ar, arvif); + if (ret) { + ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret); + return -EINVAL; + } + } spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { @@ -2950,7 +4305,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, reinit_completion(&ar->scan.completed); ar->scan.state = ATH12K_SCAN_STARTING; ar->scan.is_roc = false; - ar->scan.vdev_id = arvif->vdev_id; + ar->scan.arvif = arvif; ret = 0; break; case ATH12K_SCAN_STARTING: @@ -2964,42 +4319,47 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, if (ret) goto exit; - ath12k_wmi_start_scan_init(ar, &arg); - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH12K_SCAN_ID; + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) { + ret = -ENOMEM; + goto exit; + } + + ath12k_wmi_start_scan_init(ar, arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH12K_SCAN_ID; if (req->ie_len) { - arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); - if (!arg.extraie.ptr) { + arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); + if (!arg->extraie.ptr) { ret = -ENOMEM; goto exit; } - arg.extraie.len = req->ie_len; + arg->extraie.len = req->ie_len; } if (req->n_ssids) { - arg.num_ssids = req->n_ssids; - for (i = 0; i < arg.num_ssids; i++) - arg.ssid[i] = req->ssids[i]; + arg->num_ssids = req->n_ssids; + for (i = 0; i < arg->num_ssids; i++) + arg->ssid[i] = req->ssids[i]; } else { - arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE; + arg->scan_f_passive = 1; } if (req->n_channels) { - arg.num_chan = req->n_channels; - arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), - GFP_KERNEL); - - if (!arg.chan_list) { + arg->num_chan = req->n_channels; + arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), + GFP_KERNEL); + if (!arg->chan_list) { ret = -ENOMEM; goto exit; } - for (i = 0; i < arg.num_chan; i++) - arg.chan_list[i] = req->channels[i]->center_freq; + for (i = 0; i < arg->num_chan; i++) + arg->chan_list[i] = req->channels[i]->center_freq; } - ret = ath12k_start_scan(ar, &arg); + ret = ath12k_start_scan(ar, arg); if (ret) { ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); @@ -3007,34 +4367,52 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); } + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started"); + + /* As per cfg80211/mac80211 scan design, it allows only one + * scan at a time. Hence last_scan link id is used for + * tracking the link id on which the scan is been done on + * this vif. + */ + ahvif->last_scan_link = arvif->link_id; + /* Add a margin to account for event/command processing */ - ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, - msecs_to_jiffies(arg.max_scan_time + + ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout, + msecs_to_jiffies(arg->max_scan_time + ATH12K_MAC_SCAN_TIMEOUT_MSECS)); exit: - kfree(arg.chan_list); - - if (req->ie_len) - kfree(arg.extraie.ptr); + if (arg) { + kfree(arg->chan_list); + kfree(arg->extraie.ptr); + kfree(arg); + } - mutex_unlock(&ar->conf_mutex); return ret; } static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath12k *ar = hw->priv; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + u16 link_id = ahvif->last_scan_link; + struct ath12k_link_vif *arvif; + struct ath12k *ar; + + lockdep_assert_wiphy(hw->wiphy); + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!arvif || arvif->is_started) + return; + + ar = arvif->ar; - mutex_lock(&ar->conf_mutex); ath12k_scan_abort(ar); - mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); } -static int ath12k_install_key(struct ath12k_vif *arvif, +static int ath12k_install_key(struct ath12k_link_vif *arvif, struct ieee80211_key_conf *key, enum set_key_cmd cmd, const u8 *macaddr, u32 flags) @@ -3049,8 +4427,10 @@ static int ath12k_install_key(struct ath12k_vif *arvif, .key_flags = flags, .macaddr = macaddr, }; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); - lockdep_assert_held(&arvif->ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->install_key_done); @@ -3101,13 +4481,13 @@ install: if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ)) return -ETIMEDOUT; - if (ether_addr_equal(macaddr, arvif->vif->addr)) - arvif->key_cipher = key->cipher; + if (ether_addr_equal(macaddr, vif->addr)) + ahvif->key_cipher = key->cipher; return ar->install_key_status ? -EINVAL : 0; } -static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, +static int ath12k_clear_peer_keys(struct ath12k_link_vif *arvif, const u8 *addr) { struct ath12k *ar = arvif->ar; @@ -3118,7 +4498,7 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, int i; u32 flags = 0; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); spin_lock_bh(&ab->base_lock); peer = ath12k_peer_find(ab, arvif->vdev_id, addr); @@ -3149,45 +4529,48 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, return first_errno; } -static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, + struct ieee80211_key_conf *key) { - struct ath12k *ar = hw->priv; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ieee80211_bss_conf *link_conf; + struct ieee80211_sta *sta = NULL; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_peer *peer; - struct ath12k_sta *arsta; + struct ath12k_sta *ahsta; const u8 *peer_addr; - int ret = 0; + int ret; u32 flags = 0; - /* BIP needs to be done in software */ - if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || - key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) - return 1; + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) - return 1; + if (arsta) + sta = ath12k_ahsta_to_sta(arsta->ahsta); - if (key->keyidx > WMI_MAX_KEY_INDEX) - return -ENOSPC; + if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) + return 1; - mutex_lock(&ar->conf_mutex); + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n", + vif->addr, arvif->link_id); + return -ENOLINK; + } if (sta) - peer_addr = sta->addr; - else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) - peer_addr = vif->bss_conf.bssid; + peer_addr = arsta->addr; + else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) + peer_addr = link_conf->bssid; else - peer_addr = vif->addr; + peer_addr = link_conf->addr; key->hw_key_idx = key->keyidx; /* the peer should not disappear in mid-way (unless FW goes awry) since - * we already hold conf_mutex. we just make sure its there now. + * we already hold wiphy lock. we just make sure its there now. */ spin_lock_bh(&ab->base_lock); peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); @@ -3197,14 +4580,13 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (cmd == SET_KEY) { ath12k_warn(ab, "cannot install key for non-existent peer %pM\n", peer_addr); - ret = -EOPNOTSUPP; - goto exit; - } else { - /* if the peer doesn't exist there is no key to disable - * anymore - */ - goto exit; + return -EOPNOTSUPP; } + + /* if the peer doesn't exist there is no key to disable + * anymore + */ + return 0; } if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) @@ -3215,13 +4597,13 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags); if (ret) { ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret); - goto exit; + return ret; } ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key); if (ret) { ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret); - goto exit; + return ret; } spin_lock_bh(&ab->base_lock); @@ -3246,7 +4628,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr); if (sta) { - arsta = (struct ath12k_sta *)sta->drv_priv; + ahsta = ath12k_sta_to_ahsta(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: @@ -3255,21 +4637,152 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (cmd == SET_KEY) - arsta->pn_type = HAL_PN_TYPE_WPA; + ahsta->pn_type = HAL_PN_TYPE_WPA; else - arsta->pn_type = HAL_PN_TYPE_NONE; + ahsta->pn_type = HAL_PN_TYPE_NONE; break; default: - arsta->pn_type = HAL_PN_TYPE_NONE; + ahsta->pn_type = HAL_PN_TYPE_NONE; break; } } spin_unlock_bh(&ab->base_lock); -exit: - mutex_unlock(&ar->conf_mutex); - return ret; + return 0; +} + +static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache, + enum set_key_cmd cmd, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath12k_key_conf *key_conf, *tmp; + + list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) { + if (key_conf->key != key) + continue; + + /* If SET key entry is already present in cache, nothing to do, + * just return + */ + if (cmd == SET_KEY) + return 0; + + /* DEL key for an old SET key which driver hasn't flushed yet. + */ + list_del(&key_conf->list); + kfree(key_conf); + } + + if (cmd == SET_KEY) { + key_conf = kzalloc(sizeof(*key_conf), GFP_KERNEL); + + if (!key_conf) + return -ENOMEM; + + key_conf->cmd = cmd; + key_conf->sta = sta; + key_conf->key = key; + list_add_tail(&key_conf->list, + &cache->key_conf.list); + } + + return 0; +} + +static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + struct ath12k_link_sta *arsta = NULL; + struct ath12k_vif_cache *cache; + struct ath12k_sta *ahsta; + unsigned long links; + u8 link_id; + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + /* BIP needs to be done in software */ + if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || + key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) { + return 1; + } + + if (key->keyidx > WMI_MAX_KEY_INDEX) + return -ENOSPC; + + if (sta) { + ahsta = ath12k_sta_to_ahsta(sta); + + /* For an ML STA Pairwise key is same for all associated link Stations, + * hence do set key for all link STAs which are active. + */ + if (sta->mlo) { + links = ahsta->links_map; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, + ahvif->link[link_id]); + arsta = wiphy_dereference(hw->wiphy, + ahsta->link[link_id]); + + if (WARN_ON(!arvif || !arsta)) + /* arvif and arsta are expected to be valid when + * STA is present. + */ + continue; + + ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, + arsta, key); + if (ret) + break; + } + + return 0; + } + + arsta = &ahsta->deflink; + arvif = arsta->arvif; + if (WARN_ON(!arvif)) + return -EINVAL; + + ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key); + if (ret) + return ret; + + return 0; + } + + if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) { + link_id = key->link_id; + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + } else { + link_id = 0; + arvif = &ahvif->deflink; + } + + if (!arvif || !arvif->is_created) { + cache = ath12k_ahvif_get_link_cache(ahvif, link_id); + if (!cache) + return -ENOSPC; + + ret = ath12k_mac_update_key_cache(cache, cmd, sta, key); + if (ret) + return ret; + + return 0; + } + + ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key); + if (ret) + return ret; + + return 0; } static int @@ -3287,8 +4800,8 @@ ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar, } static int -ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif, - struct ieee80211_sta *sta, +ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, const struct cfg80211_bitrate_mask *mask, enum nl80211_band band) { @@ -3297,7 +4810,7 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif, u32 rate_code; int ret, i; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); nss = 0; @@ -3310,61 +4823,76 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif, if (!nss) { ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM", - sta->addr); + arsta->addr); return -EINVAL; } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates", - sta->addr); + arsta->addr); rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1, WMI_RATE_PREAMBLE_VHT); - ret = ath12k_wmi_set_peer_param(ar, sta->addr, + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, rate_code); if (ret) ath12k_warn(ar->ab, "failed to update STA %pM Fixed Rate %d: %d\n", - sta->addr, rate_code, ret); + arsta->addr, rate_code, ret); return ret; } -static int ath12k_station_assoc(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - bool reassoc) +static int ath12k_mac_station_assoc(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, + bool reassoc) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_wmi_peer_assoc_arg peer_arg; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct ieee80211_link_sta *link_sta; int ret; struct cfg80211_chan_def def; enum nl80211_band band; struct cfg80211_bitrate_mask *mask; u8 num_vht_rates; + u8 link_id = arvif->link_id; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return -EPERM; + if (WARN_ON(!rcu_access_pointer(sta->link[link_id]))) + return -EINVAL; + band = def.chan->band; mask = &arvif->bitrate_mask; - ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) = + kzalloc(sizeof(*peer_arg), GFP_KERNEL); + if (!peer_arg) + return -ENOMEM; + + ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, reassoc); - ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (peer_arg->peer_nss < 1) { + ath12k_warn(ar->ab, + "invalid peer NSS %d\n", peer_arg->peer_nss); + return -EINVAL; + } + ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg); if (ret) { ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", - sta->addr, arvif->vdev_id, ret); + arsta->addr, arvif->vdev_id, ret); return ret; } if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", - sta->addr, arvif->vdev_id); + arsta->addr, arvif->vdev_id); return -ETIMEDOUT; } @@ -3375,8 +4903,14 @@ static int ath12k_station_assoc(struct ath12k *ar, * fixed param. * Note that all other rates and NSS will be disabled for this peer. */ - if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { - ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in station assoc\n"); + return -EINVAL; + } + + if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) { + ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, band); if (ret) return ret; @@ -3388,8 +4922,8 @@ static int ath12k_station_assoc(struct ath12k *ar, if (reassoc) return 0; - ret = ath12k_setup_peer_smps(ar, arvif, sta->addr, - &sta->deflink.ht_cap); + ret = ath12k_setup_peer_smps(ar, arvif, arsta->addr, + &link_sta->ht_cap, &link_sta->he_6ghz_capa); if (ret) { ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -3404,10 +4938,10 @@ static int ath12k_station_assoc(struct ath12k *ar, } if (sta->wme && sta->uapsd_queues) { - ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta); + ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta); if (ret) { ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n", - sta->addr, arvif->vdev_id, ret); + arsta->addr, arvif->vdev_id, ret); return ret; } } @@ -3415,36 +4949,27 @@ static int ath12k_station_assoc(struct ath12k *ar, return 0; } -static int ath12k_station_disassoc(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static int ath12k_mac_station_disassoc(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; - int ret; + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!sta->wme) { arvif->num_legacy_stations--; - ret = ath12k_recalc_rtscts_prot(arvif); - if (ret) - return ret; + return ath12k_recalc_rtscts_prot(arvif); } - ret = ath12k_clear_peer_keys(arvif, sta->addr); - if (ret) { - ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", - arvif->vdev_id, ret); - return ret; - } return 0; } -static void ath12k_sta_rc_update_wk(struct work_struct *wk) +static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk) { + struct ieee80211_link_sta *link_sta; struct ath12k *ar; - struct ath12k_vif *arvif; - struct ath12k_sta *arsta; + struct ath12k_link_vif *arvif; struct ieee80211_sta *sta; struct cfg80211_chan_def def; enum nl80211_band band; @@ -3453,15 +4978,19 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) u32 changed, bw, nss, smps, bw_prev; int err, num_vht_rates; const struct cfg80211_bitrate_mask *mask; - struct ath12k_wmi_peer_assoc_arg peer_arg; enum wmi_phy_mode peer_phymode; + struct ath12k_link_sta *arsta; + struct ieee80211_vif *vif; - arsta = container_of(wk, struct ath12k_sta, update_wk); - sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); + lockdep_assert_wiphy(wiphy); + + arsta = container_of(wk, struct ath12k_link_sta, update_wk); + sta = ath12k_ahsta_to_sta(arsta->ahsta); arvif = arsta->arvif; + vif = ath12k_ahvif_to_vif(arvif->ahvif); ar = arvif->ar; - if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; band = def.chan->band; @@ -3480,15 +5009,18 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) spin_unlock_bh(&ar->data_lock); - mutex_lock(&ar->conf_mutex); - nss = max_t(u32, 1, nss); nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask), ath12k_mac_max_vht_nss(vht_mcs_mask))); + struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) = + kzalloc(sizeof(*peer_arg), GFP_KERNEL); + if (!peer_arg) + return; + if (changed & IEEE80211_RC_BW_CHANGED) { - ath12k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg); - peer_phymode = peer_arg.peer_phymode; + ath12k_peer_assoc_h_phymode(ar, arvif, arsta, peer_arg); + peer_phymode = peer_arg->peer_phymode; if (bw > bw_prev) { /* Phymode shows maximum supported channel width, if we @@ -3497,65 +5029,65 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) * WMI_PEER_CHWIDTH */ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n", - sta->addr, bw, bw_prev); - err = ath12k_wmi_set_peer_param(ar, sta->addr, + arsta->addr, bw, bw_prev); + err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_PHYMODE, peer_phymode); if (err) { ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", - sta->addr, peer_phymode, err); - goto err_rc_bw_changed; + arsta->addr, peer_phymode, err); + return; } - err = ath12k_wmi_set_peer_param(ar, sta->addr, + err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_CHWIDTH, bw); if (err) ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n", - sta->addr, bw, err); + arsta->addr, bw, err); } else { /* When we downgrade bandwidth this will conflict with phymode * and cause to trigger firmware crash. In this case we send * WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE */ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n", - sta->addr, bw, bw_prev); - err = ath12k_wmi_set_peer_param(ar, sta->addr, + arsta->addr, bw, bw_prev); + err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_CHWIDTH, bw); if (err) { ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n", - sta->addr, bw, err); - goto err_rc_bw_changed; + arsta->addr, bw, err); + return; } - err = ath12k_wmi_set_peer_param(ar, sta->addr, + err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_PHYMODE, peer_phymode); if (err) ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", - sta->addr, peer_phymode, err); + arsta->addr, peer_phymode, err); } } if (changed & IEEE80211_RC_NSS_CHANGED) { ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n", - sta->addr, nss); + arsta->addr, nss); - err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_NSS, nss); if (err) ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n", - sta->addr, nss, err); + arsta->addr, nss, err); } if (changed & IEEE80211_RC_SMPS_CHANGED) { ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n", - sta->addr, smps); + arsta->addr, smps); - err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_MIMO_PS_STATE, smps); if (err) ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n", - sta->addr, smps, err); + arsta->addr, smps, err); } if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { @@ -3574,39 +5106,74 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) * TODO: Check RATEMASK_CMDID to support auto rates selection * across HT/VHT and for multiple VHT MCS support. */ - if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { - ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta) { + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + + if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) { + ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, band); } else { /* If the peer is non-VHT or no fixed VHT rate * is provided in the new bitrate mask we set the * other rates using peer_assoc command. */ - ath12k_peer_assoc_prepare(ar, arvif->vif, sta, - &peer_arg, true); + ath12k_peer_assoc_prepare(ar, arvif, arsta, + peer_arg, true); - err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + err = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg); if (err) ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", - sta->addr, arvif->vdev_id, err); + arsta->addr, arvif->vdev_id, err); if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", - sta->addr, arvif->vdev_id); + arsta->addr, arvif->vdev_id); } } -err_rc_bw_changed: - mutex_unlock(&ar->conf_mutex); } -static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif, - struct ieee80211_sta *sta) +static void ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah, + struct ath12k_sta *ahsta, + u8 link_id) { + struct ath12k_link_sta *arsta; + + lockdep_assert_wiphy(ah->hw->wiphy); + + if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) + return; + + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); + if (WARN_ON(!arsta)) + return; + + ahsta->links_map &= ~BIT(link_id); + rcu_assign_pointer(ahsta->link[link_id], NULL); + synchronize_rcu(); + + if (arsta == &ahsta->deflink) { + arsta->link_id = ATH12K_INVALID_LINK_ID; + arsta->ahsta = NULL; + arsta->arvif = NULL; + return; + } + + kfree(arsta); +} + +static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) +{ + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k *ar = arvif->ar; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) + if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) return 0; if (ar->num_stations >= ar->max_num_stations) @@ -3617,38 +5184,176 @@ static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif, return 0; } -static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif, - struct ieee80211_sta *sta) +static void ath12k_mac_dec_num_stations(struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k *ar = arvif->ar; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) + if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) return; ar->num_stations--; } +static void ath12k_mac_station_post_remove(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) +{ + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct ath12k_peer *peer; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + ath12k_mac_dec_num_stations(arvif, arsta); + + spin_lock_bh(&ar->ab->base_lock); + + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr); + if (peer && peer->sta == sta) { + ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", + vif->addr, arvif->vdev_id); + peer->sta = NULL; + list_del(&peer->list); + kfree(peer); + ar->num_peers--; + } + + spin_unlock_bh(&ar->ab->base_lock); + + kfree(arsta->rx_stats); + arsta->rx_stats = NULL; +} + +static int ath12k_mac_station_unauthorize(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) +{ + struct ath12k_peer *peer; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + spin_lock_bh(&ar->ab->base_lock); + + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr); + if (peer) + peer->is_authorized = false; + + spin_unlock_bh(&ar->ab->base_lock); + + /* Driver must clear the keys during the state change from + * IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC, since after + * returning from here, mac80211 is going to delete the keys + * in __sta_info_destroy_part2(). This will ensure that the driver does + * not retain stale key references after mac80211 deletes the keys. + */ + ret = ath12k_clear_peer_keys(arvif, arsta->addr); + if (ret) { + ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath12k_mac_station_authorize(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) +{ + struct ath12k_peer *peer; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + spin_lock_bh(&ar->ab->base_lock); + + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr); + if (peer) + peer->is_authorized = true; + + spin_unlock_bh(&ar->ab->base_lock); + + if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, + arvif->vdev_id, + WMI_PEER_AUTHORIZE, + 1); + if (ret) { + ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", + arsta->addr, arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_mac_station_remove(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) +{ + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct ath12k_vif *ahvif = arvif->ahvif; + int ret = 0; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + wiphy_work_cancel(ar->ah->hw->wiphy, &arsta->update_wk); + + if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) { + ath12k_bss_disassoc(ar, arvif); + ret = ath12k_mac_vdev_stop(arvif); + if (ret) + ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n", + arvif->vdev_id, ret); + } + + if (sta->mlo) + return ret; + + ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr); + + ret = ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr); + if (ret) + ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", + arsta->addr, arvif->vdev_id); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", + arsta->addr, arvif->vdev_id); + + ath12k_mac_station_post_remove(ar, arvif, arsta); + + if (sta->valid_links) + ath12k_mac_free_unassign_link_sta(ahvif->ah, + arsta->ahsta, arsta->link_id); + + return ret; +} + static int ath12k_mac_station_add(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; - struct ath12k_wmi_peer_create_arg peer_param; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct ath12k_wmi_peer_create_arg peer_param = {0}; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - ret = ath12k_mac_inc_num_stations(arvif, sta); + ret = ath12k_mac_inc_num_stations(arvif, arsta); if (ret) { ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n", ar->max_num_stations); goto exit; } - arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); if (!arsta->rx_stats) { ret = -ENOMEM; @@ -3656,41 +5361,42 @@ static int ath12k_mac_station_add(struct ath12k *ar, } peer_param.vdev_id = arvif->vdev_id; - peer_param.peer_addr = sta->addr; + peer_param.peer_addr = arsta->addr; peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; + peer_param.ml_enabled = sta->mlo; ret = ath12k_peer_create(ar, arvif, sta, &peer_param); if (ret) { ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); + arsta->addr, arvif->vdev_id); goto free_peer; } ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); + arsta->addr, arvif->vdev_id); if (ieee80211_vif_is_mesh(vif)) { - ret = ath12k_wmi_set_peer_param(ar, sta->addr, + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_USE_4ADDR, 1); if (ret) { ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n", - sta->addr, ret); + arsta->addr, ret); goto free_peer; } } - ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); + ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, arsta->addr); if (ret) { ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", - sta->addr, arvif->vdev_id, ret); + arsta->addr, arvif->vdev_id, ret); goto free_peer; } if (ab->hw_params->vdev_start_delay && !arvif->is_started && - arvif->vdev_type != WMI_VDEV_TYPE_AP) { - ret = ath12k_start_vdev_delay(ar->hw, vif); + arvif->ahvif->vdev_type != WMI_VDEV_TYPE_AP) { + ret = ath12k_start_vdev_delay(ar, arvif); if (ret) { ath12k_warn(ab, "failed to delay vdev start: %d\n", ret); goto free_peer; @@ -3700,9 +5406,11 @@ static int ath12k_mac_station_add(struct ath12k *ar, return 0; free_peer: - ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); + ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr); + kfree(arsta->rx_stats); + arsta->rx_stats = NULL; dec_num_station: - ath12k_mac_dec_num_stations(arvif, sta); + ath12k_mac_dec_num_stations(arvif, arsta); exit: return ret; } @@ -3725,6 +5433,9 @@ static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar, case IEEE80211_STA_RX_BW_160: bw = WMI_PEER_CHWIDTH_160MHZ; break; + case IEEE80211_STA_RX_BW_320: + bw = WMI_PEER_CHWIDTH_320MHZ; + break; default: ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n", sta->deflink.bandwidth, sta->addr); @@ -3735,71 +5446,131 @@ static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar, return bw; } -static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - enum ieee80211_sta_state old_state, - enum ieee80211_sta_state new_state) +static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah, + struct ath12k_sta *ahsta, + struct ath12k_link_sta *arsta, + struct ath12k_vif *ahvif, + u8 link_id) { - struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; - struct ath12k_peer *peer; + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); + struct ieee80211_link_sta *link_sta; + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(ah->hw->wiphy); + + if (!arsta || link_id >= IEEE80211_MLD_MAX_NUM_LINKS) + return -EINVAL; + + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + if (!arvif) + return -EINVAL; + + memset(arsta, 0, sizeof(*arsta)); + + link_sta = wiphy_dereference(ah->hw->wiphy, sta->link[link_id]); + if (!link_sta) + return -EINVAL; + + ether_addr_copy(arsta->addr, link_sta->addr); + + /* logical index of the link sta in order of creation */ + arsta->link_idx = ahsta->num_peer++; + + arsta->link_id = link_id; + ahsta->links_map |= BIT(arsta->link_id); + arsta->arvif = arvif; + arsta->ahsta = ahsta; + ahsta->ahvif = ahvif; + + wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk); + + rcu_assign_pointer(ahsta->link[link_id], arsta); + + return 0; +} + +static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif, + struct ath12k_sta *ahsta) +{ + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); + struct ath12k_hw *ah = ahvif->ah; + struct ath12k_link_vif *arvif; + struct ath12k_link_sta *arsta; + unsigned long links; + struct ath12k *ar; + u8 link_id; + + lockdep_assert_wiphy(ah->hw->wiphy); + + ath12k_peer_mlo_link_peers_delete(ahvif, ahsta); + + /* validate link station removal and clear arsta links */ + links = ahsta->links_map; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); + if (!arvif || !arsta) + continue; + + ar = arvif->ar; + + ath12k_mac_station_post_remove(ar, arvif, arsta); + + ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id); + } + + ath12k_peer_ml_delete(ah, sta); +} + +static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct ath12k *ar = arvif->ar; int ret = 0; - /* cancel must be done outside the mutex to avoid deadlock */ - if ((old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST)) - cancel_work_sync(&arsta->update_wk); + lockdep_assert_wiphy(hw->wiphy); - mutex_lock(&ar->conf_mutex); + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n", + arsta->link_id, arsta->addr, old_state, new_state); + /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station + * from driver + */ + if ((old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST)) { + ret = ath12k_mac_station_remove(ar, arvif, arsta); + if (ret) { + ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n", + arsta->addr, arvif->vdev_id); + goto exit; + } + } + + /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: Add new station to driver */ if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { - memset(arsta, 0, sizeof(*arsta)); - arsta->arvif = arvif; - INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk); - - ret = ath12k_mac_station_add(ar, vif, sta); + ret = ath12k_mac_station_add(ar, arvif, arsta); if (ret) ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); - } else if ((old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST)) { - ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); - - ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); - if (ret) - ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); - else - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); - - ath12k_mac_dec_num_stations(arvif, sta); - spin_lock_bh(&ar->ab->base_lock); - peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); - if (peer && peer->sta == sta) { - ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", - vif->addr, arvif->vdev_id); - peer->sta = NULL; - list_del(&peer->list); - kfree(peer); - ar->num_peers--; - } - spin_unlock_bh(&ar->ab->base_lock); + arsta->addr, arvif->vdev_id); - kfree(arsta->rx_stats); - arsta->rx_stats = NULL; + /* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for + * peer associated to AP/Mesh/ADHOC vif type. + */ } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { - ret = ath12k_station_assoc(ar, vif, sta, false); + ret = ath12k_mac_station_assoc(ar, arvif, arsta, false); if (ret) ath12k_warn(ar->ab, "Failed to associate station: %pM\n", - sta->addr); + arsta->addr); spin_lock_bh(&ar->data_lock); @@ -3807,46 +5578,154 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, arsta->bw_prev = sta->deflink.bandwidth; spin_unlock_bh(&ar->data_lock); + + /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as + * authorized + */ } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { - spin_lock_bh(&ar->ab->base_lock); - - peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); - if (peer) - peer->is_authorized = true; - - spin_unlock_bh(&ar->ab->base_lock); + ret = ath12k_mac_station_authorize(ar, arvif, arsta); + if (ret) + ath12k_warn(ar->ab, "Failed to authorize station: %pM\n", + arsta->addr); - if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { - ret = ath12k_wmi_set_peer_param(ar, sta->addr, - arvif->vdev_id, - WMI_PEER_AUTHORIZE, - 1); - if (ret) - ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", - sta->addr, arvif->vdev_id, ret); - } + /* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal, + * deauthorize it. + */ } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { - spin_lock_bh(&ar->ab->base_lock); - - peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); - if (peer) - peer->is_authorized = false; + ath12k_mac_station_unauthorize(ar, arvif, arsta); - spin_unlock_bh(&ar->ab->base_lock); + /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTH: disassoc peer connected to + * AP/mesh/ADHOC vif type. + */ } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { - ret = ath12k_station_disassoc(ar, vif, sta); + ret = ath12k_mac_station_disassoc(ar, arvif, arsta); if (ret) ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n", - sta->addr); + arsta->addr); } - mutex_unlock(&ar->conf_mutex); +exit: + return ret; +} + +static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_link_vif *arvif; + struct ath12k_link_sta *arsta; + unsigned long valid_links; + u8 link_id = 0; + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + if (ieee80211_vif_is_mld(vif) && sta->valid_links) { + WARN_ON(!sta->mlo && hweight16(sta->valid_links) != 1); + link_id = ffs(sta->valid_links) - 1; + } + + /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: + * New station add received. If this is a ML station then + * ahsta->links_map will be zero and sta->valid_links will be 1. + * Assign default link to the first link sta. + */ + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + memset(ahsta, 0, sizeof(*ahsta)); + + arsta = &ahsta->deflink; + + /* ML sta */ + if (sta->mlo && !ahsta->links_map && + (hweight16(sta->valid_links) == 1)) { + ret = ath12k_peer_ml_create(ah, sta); + if (ret) { + ath12k_hw_warn(ah, "unable to create ML peer for sta %pM", + sta->addr); + goto exit; + } + } + + ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, + link_id); + if (ret) { + ath12k_hw_warn(ah, "unable assign link %d for sta %pM", + link_id, sta->addr); + goto exit; + } + + /* above arsta will get memset, hence do this after assign + * link sta + */ + if (sta->mlo) { + arsta->is_assoc_link = true; + ahsta->assoc_link_id = link_id; + } + } + + /* In the ML station scenario, activate all partner links once the + * client is transitioning to the associated state. + * + * FIXME: Ideally, this activation should occur when the client + * transitions to the authorized state. However, there are some + * issues with handling this in the firmware. Until the firmware + * can manage it properly, activate the links when the client is + * about to move to the associated state. + */ + if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION && + old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) + ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif)); + + /* Handle all the other state transitions in generic way */ + valid_links = ahsta->links_map; + for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]); + /* some assumptions went wrong! */ + if (WARN_ON(!arvif || !arsta)) + continue; + + /* vdev might be in deleted */ + if (WARN_ON(!arvif->ar)) + continue; + + ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta, + old_state, new_state); + if (ret) { + ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d", + link_id, arsta->addr, old_state, new_state); + goto exit; + } + } + + /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: + * Remove the station from driver (handle ML sta here since that + * needs special handling. Normal sta will be handled in generic + * handler below + */ + if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST && sta->mlo) + ath12k_mac_ml_station_remove(ahvif, ahsta); + + ret = 0; + +exit: + /* update the state if everything went well */ + if (!ret) + ahsta->state = new_state; + return ret; } @@ -3854,25 +5733,41 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k *ar; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + struct ath12k_link_sta *arsta; + u8 link_id; int ret; s16 txpwr; + lockdep_assert_wiphy(hw->wiphy); + + /* TODO: use link id from mac80211 once that's implemented */ + link_id = 0; + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]); + if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { txpwr = 0; } else { txpwr = sta->deflink.txpwr.power; - if (!txpwr) - return -EINVAL; + if (!txpwr) { + ret = -EINVAL; + goto out; + } } - if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) - return -EINVAL; + if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) { + ret = -EINVAL; + goto out; + } - mutex_lock(&ar->conf_mutex); + ar = arvif->ar; - ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_USE_FIXED_PWR, txpwr); if (ret) { ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n", @@ -3881,37 +5776,72 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, } out: - mutex_unlock(&ar->conf_mutex); return ret; } -static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - u32 changed) +static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + u32 changed) { - struct ath12k *ar = hw->priv; - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ieee80211_sta *sta = link_sta->sta; + struct ath12k *ar; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_link_sta *arsta; + struct ath12k_link_vif *arvif; struct ath12k_peer *peer; u32 bw, smps; + rcu_read_lock(); + arvif = rcu_dereference(ahvif->link[link_sta->link_id]); + if (!arvif) { + ath12k_hw_warn(ah, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n", + link_sta->link_id, sta->addr); + rcu_read_unlock(); + return; + } + + ar = arvif->ar; + + arsta = rcu_dereference(ahsta->link[link_sta->link_id]); + if (!arsta) { + rcu_read_unlock(); + ath12k_warn(ar->ab, "mac sta rc update failed to fetch link sta on link id %u for peer %pM\n", + link_sta->link_id, sta->addr); + return; + } spin_lock_bh(&ar->ab->base_lock); - peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr); if (!peer) { spin_unlock_bh(&ar->ab->base_lock); + rcu_read_unlock(); ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n", - sta->addr, arvif->vdev_id); + arsta->addr, arvif->vdev_id); return; } spin_unlock_bh(&ar->ab->base_lock); + if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { + rcu_read_unlock(); + return; + } + + link_sta = rcu_dereference(sta->link[arsta->link_id]); + if (!link_sta) { + rcu_read_unlock(); + ath12k_warn(ar->ab, "unable to access link sta in rc update for sta %pM link %u\n", + sta->addr, arsta->link_id); + return; + } + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", - sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss, - sta->deflink.smps_mode); + arsta->addr, changed, link_sta->bandwidth, link_sta->rx_nss, + link_sta->smps_mode); spin_lock_bh(&ar->data_lock); @@ -3922,12 +5852,12 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, } if (changed & IEEE80211_RC_NSS_CHANGED) - arsta->nss = sta->deflink.rx_nss; + arsta->nss = link_sta->rx_nss; if (changed & IEEE80211_RC_SMPS_CHANGED) { smps = WMI_PEER_SMPS_PS_NONE; - switch (sta->deflink.smps_mode) { + switch (link_sta->smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_OFF: smps = WMI_PEER_SMPS_PS_NONE; @@ -3939,8 +5869,8 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, smps = WMI_PEER_SMPS_DYNAMIC; break; default: - ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n", - sta->deflink.smps_mode, sta->addr); + ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM link %u\n", + link_sta->smps_mode, arsta->addr, link_sta->link_id); smps = WMI_PEER_SMPS_PS_NONE; break; } @@ -3952,17 +5882,124 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); - ieee80211_queue_work(hw, &arsta->update_wk); + wiphy_work_queue(hw->wiphy, &arsta->update_wk); + + rcu_read_unlock(); } -static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif, +static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah, + struct ath12k_sta *ahsta, + struct ath12k_vif *ahvif, + u8 link_id) +{ + struct ath12k_link_sta *arsta; + int ret; + + lockdep_assert_wiphy(ah->hw->wiphy); + + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) + return NULL; + + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); + if (arsta) + return NULL; + + arsta = kmalloc(sizeof(*arsta), GFP_KERNEL); + if (!arsta) + return NULL; + + ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, link_id); + if (ret) { + kfree(arsta); + return NULL; + } + + return arsta; +} + +static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_hw *ah = hw->priv; + struct ath12k_link_vif *arvif; + struct ath12k_link_sta *arsta; + unsigned long valid_links; + struct ath12k *ar; + u8 link_id; + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + if (!sta->valid_links) + return -EINVAL; + + /* Firmware does not support removal of one of link stas. All sta + * would be removed during ML STA delete in sta_state(), hence link + * sta removal is not handled here. + */ + if (new_links < old_links) + return 0; + + if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) { + ath12k_hw_warn(ah, "unable to add link for ml sta %pM", sta->addr); + return -EINVAL; + } + + /* this op is expected only after initial sta insertion with default link */ + if (WARN_ON(ahsta->links_map == 0)) + return -EINVAL; + + valid_links = new_links; + for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) { + if (ahsta->links_map & BIT(link_id)) + continue; + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + arsta = ath12k_mac_alloc_assign_link_sta(ah, ahsta, ahvif, link_id); + + if (!arvif || !arsta) { + ath12k_hw_warn(ah, "Failed to alloc/assign link sta"); + continue; + } + + ar = arvif->ar; + if (!ar) + continue; + + ret = ath12k_mac_station_add(ar, arvif, arsta); + if (ret) { + ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", + arsta->addr, arvif->vdev_id); + ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id); + return ret; + } + } + + return 0; +} + +static bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 active_links) +{ + /* TODO: Handle recovery case */ + + return true; +} + +static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif, u16 ac, bool enable) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k *ar = arvif->ar; + struct ath12k_vif *ahvif = arvif->ahvif; u32 value; int ret; - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + if (ahvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; switch (ac) { @@ -3985,19 +6022,19 @@ static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif, } if (enable) - arvif->u.sta.uapsd |= value; + ahvif->u.sta.uapsd |= value; else - arvif->u.sta.uapsd &= ~value; + ahvif->u.sta.uapsd &= ~value; ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, WMI_STA_PS_PARAM_UAPSD, - arvif->u.sta.uapsd); + ahvif->u.sta.uapsd); if (ret) { ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret); goto exit; } - if (arvif->u.sta.uapsd) + if (ahvif->u.sta.uapsd) value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; else value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; @@ -4012,17 +6049,15 @@ exit: return ret; } -static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - unsigned int link_id, u16 ac, - const struct ieee80211_tx_queue_params *params) +static int ath12k_mac_conf_tx(struct ath12k_link_vif *arvif, u16 ac, + const struct ieee80211_tx_queue_params *params) { - struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; struct wmi_wmm_params_arg *p = NULL; + struct ath12k *ar = arvif->ar; + struct ath12k_base *ab = ar->ab; int ret; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); switch (ac) { case IEEE80211_AC_VO: @@ -4052,17 +6087,50 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id, &arvif->wmm_params); if (ret) { - ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret); + ath12k_warn(ab, "pdev idx %d failed to set wmm params: %d\n", + ar->pdev_idx, ret); goto exit; } - ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd); - + ret = ath12k_conf_tx_uapsd(arvif, ac, params->uapsd); if (ret) - ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret); + ath12k_warn(ab, "pdev idx %d failed to set sta uapsd: %d\n", + ar->pdev_idx, ret); exit: - mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + struct ath12k_vif_cache *cache; + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) + return -EINVAL; + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!arvif || !arvif->is_created) { + cache = ath12k_ahvif_get_link_cache(ahvif, link_id); + if (!cache) + return -ENOSPC; + + cache->tx_conf.changed = true; + cache->tx_conf.ac = ac; + cache->tx_conf.tx_queue_params = *params; + + return 0; + } + + ret = ath12k_mac_conf_tx(arvif, ac, params); + return ret; } @@ -4132,10 +6200,11 @@ ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask return ht_cap; } -static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) +static int ath12k_mac_set_txbf_conf(struct ath12k_link_vif *arvif) { u32 value = 0; struct ath12k *ar = arvif->ar; + struct ath12k_vif *ahvif = arvif->ahvif; int nsts; int sound_dim; u32 vht_cap = ar->pdev->cap.vht_cap; @@ -4163,7 +6232,7 @@ static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) && - arvif->vdev_type == WMI_VDEV_TYPE_AP) + ahvif->vdev_type == WMI_VDEV_TYPE_AP) value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; } @@ -4171,7 +6240,7 @@ static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && - arvif->vdev_type == WMI_VDEV_TYPE_STA) + ahvif->vdev_type == WMI_VDEV_TYPE_STA) value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; } @@ -4553,7 +6622,50 @@ static void ath12k_mac_copy_eht_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg * } } -static void ath12k_mac_copy_eht_cap(struct ath12k_band_cap *band_cap, +static void +ath12k_mac_filter_eht_cap_mesh(struct ieee80211_eht_cap_elem_fixed + *eht_cap_elem) +{ + u8 m; + + m = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS; + eht_cap_elem->mac_cap_info[0] &= ~m; + + m = IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO; + eht_cap_elem->phy_cap_info[0] &= ~m; + + m = IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; + eht_cap_elem->phy_cap_info[3] &= ~m; + + m = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | + IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI; + eht_cap_elem->phy_cap_info[4] &= ~m; + + m = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK; + eht_cap_elem->phy_cap_info[5] &= ~m; + + m = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK; + eht_cap_elem->phy_cap_info[6] &= ~m; + + m = IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; + eht_cap_elem->phy_cap_info[7] &= ~m; +} + +static void ath12k_mac_copy_eht_cap(struct ath12k *ar, + struct ath12k_band_cap *band_cap, struct ieee80211_he_cap_elem *he_cap_elem, int iftype, struct ieee80211_sta_eht_cap *eht_cap) @@ -4561,6 +6673,10 @@ static void ath12k_mac_copy_eht_cap(struct ath12k_band_cap *band_cap, struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem; memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap)); + + if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map))) + return; + eht_cap->has_eht = true; memcpy(eht_cap_elem->mac_cap_info, band_cap->eht_cap_mac_info, sizeof(eht_cap_elem->mac_cap_info)); @@ -4586,6 +6702,9 @@ static void ath12k_mac_copy_eht_cap(struct ath12k_band_cap *band_cap, IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ); break; + case NL80211_IFTYPE_MESH_POINT: + ath12k_mac_filter_eht_cap_mesh(eht_cap_elem); + break; default: break; } @@ -4626,7 +6745,7 @@ static int ath12k_mac_copy_sband_iftype_data(struct ath12k *ar, data[idx].he_6ghz_capa.capa = ath12k_mac_setup_he_6ghz_cap(cap, band_cap); } - ath12k_mac_copy_eht_cap(band_cap, &he_cap->he_cap_elem, i, + ath12k_mac_copy_eht_cap(ar, band_cap, &he_cap->he_cap_elem, i, &data[idx].eht_cap); idx++; } @@ -4647,8 +6766,8 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, ar->mac.iftype[band], band); sband = &ar->mac.sbands[band]; - sband->iftype_data = ar->mac.iftype[band]; - sband->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { @@ -4657,8 +6776,8 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, ar->mac.iftype[band], band); sband = &ar->mac.sbands[band]; - sband->iftype_data = ar->mac.iftype[band]; - sband->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && @@ -4668,16 +6787,17 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, ar->mac.iftype[band], band); sband = &ar->mac.sbands[band]; - sband->iftype_data = ar->mac.iftype[band]; - sband->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } } static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) { + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (ath12k_check_chain_mask(ar, tx_ant, true)) return -EINVAL; @@ -4685,11 +6805,18 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) if (ath12k_check_chain_mask(ar, rx_ant, false)) return -EINVAL; + /* Since we advertised the max cap of all radios combined during wiphy + * registration, ensure we don't set the antenna config higher than the + * limits + */ + tx_ant = min_t(u32, tx_ant, ar->pdev->cap.tx_chain_mask); + rx_ant = min_t(u32, rx_ant, ar->pdev->cap.rx_chain_mask); + ar->cfg_tx_chainmask = tx_ant; ar->cfg_rx_chainmask = rx_ant; - if (ar->state != ATH12K_STATE_ON && - ar->state != ATH12K_STATE_RESTARTED) + if (ah->state != ATH12K_HW_STATE_ON && + ah->state != ATH12K_HW_STATE_RESTARTED) return 0; ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK, @@ -4723,7 +6850,9 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb) { int num_mgmt; - ieee80211_free_txskb(ar->hw, skb); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb); num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); @@ -4774,7 +6903,7 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) return 0; } -static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, +static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arvif, struct sk_buff *skb) { struct ath12k_base *ab = ar->ab; @@ -4784,6 +6913,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, int buf_id; int ret; + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + ATH12K_SKB_CB(skb)->ar = ar; spin_lock_bh(&ar->txmgmt_idr_lock); buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, @@ -4838,14 +6969,18 @@ static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar) ath12k_mgmt_over_wmi_tx_drop(ar, skb); } -static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) +static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work); + struct ath12k_hw *ah = ar->ah; struct ath12k_skb_cb *skb_cb; - struct ath12k_vif *arvif; + struct ath12k_vif *ahvif; + struct ath12k_link_vif *arvif; struct sk_buff *skb; int ret; + lockdep_assert_wiphy(wiphy); + while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { skb_cb = ATH12K_SKB_CB(skb); if (!skb_cb->vif) { @@ -4854,9 +6989,17 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) continue; } - arvif = ath12k_vif_to_arvif(skb_cb->vif); - if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) && - arvif->is_started) { + ahvif = ath12k_vif_to_ahvif(skb_cb->vif); + if (!(ahvif->links_map & BIT(skb_cb->link_id))) { + ath12k_warn(ar->ab, + "invalid linkid %u in mgmt over wmi tx with linkmap 0x%x\n", + skb_cb->link_id, ahvif->links_map); + ath12k_mgmt_over_wmi_tx_drop(ar, skb); + continue; + } + + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[skb_cb->link_id]); + if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb); if (ret) { ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", @@ -4865,8 +7008,9 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) } } else { ath12k_warn(ar->ab, - "dropping mgmt frame for vdev %d, is_started %d\n", + "dropping mgmt frame for vdev %d link %u is_started %d\n", arvif->vdev_id, + skb_cb->link_id, arvif->is_started); ath12k_mgmt_over_wmi_tx_drop(ar, skb); } @@ -4900,26 +7044,150 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb, skb_queue_tail(q, skb); atomic_inc(&ar->num_pending_mgmt_tx); - ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); + wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work); return 0; } +static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar, + struct ieee80211_vif *vif, + struct sk_buff *skb, + bool is_prb_rsp) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + + if (likely(!is_prb_rsp)) + return; + + spin_lock_bh(&ar->data_lock); + + if (ahvif->u.ap.noa_data && + !pskb_expand_head(skb, 0, ahvif->u.ap.noa_len, + GFP_ATOMIC)) + skb_put_data(skb, ahvif->u.ap.noa_data, + ahvif->u.ap.noa_len); + + spin_unlock_bh(&ar->data_lock); +} + +/* Note: called under rcu_read_lock() */ +static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif, + u8 link, struct sk_buff *skb, u32 info_flags) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ieee80211_link_sta *link_sta; + struct ieee80211_bss_conf *bss_conf; + struct ath12k_sta *ahsta; + + /* Use the link id passed or the default vif link */ + if (!sta) { + if (link != IEEE80211_LINK_UNSPECIFIED) + return link; + + return ahvif->deflink.link_id; + } + + ahsta = ath12k_sta_to_ahsta(sta); + + /* Below translation ensures we pass proper A2 & A3 for non ML clients. + * Also it assumes for now support only for MLO AP in this path + */ + if (!sta->mlo) { + link = ahsta->deflink.link_id; + + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) + return link; + + bss_conf = rcu_dereference(vif->link_conf[link]); + if (bss_conf) { + ether_addr_copy(hdr->addr2, bss_conf->addr); + if (!ieee80211_has_tods(hdr->frame_control) && + !ieee80211_has_fromds(hdr->frame_control)) + ether_addr_copy(hdr->addr3, bss_conf->addr); + } + + return link; + } + + /* enqueue eth enacap & data frames on primary link, FW does link + * selection and address translation. + */ + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP || + ieee80211_is_data(hdr->frame_control)) + return ahsta->assoc_link_id; + + /* 802.11 frame cases */ + if (link == IEEE80211_LINK_UNSPECIFIED) + link = ahsta->deflink.link_id; + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return link; + + /* Perform address conversion for ML STA Tx */ + bss_conf = rcu_dereference(vif->link_conf[link]); + link_sta = rcu_dereference(sta->link[link]); + + if (bss_conf && link_sta) { + ether_addr_copy(hdr->addr1, link_sta->addr); + ether_addr_copy(hdr->addr2, bss_conf->addr); + + if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid) + ether_addr_copy(hdr->addr3, bss_conf->bssid); + else if (vif->type == NL80211_IFTYPE_AP) + ether_addr_copy(hdr->addr3, bss_conf->addr); + + return link; + } + + if (bss_conf) { + /* In certain cases where a ML sta associated and added subset of + * links on which the ML AP is active, but now sends some frame + * (ex. Probe request) on a different link which is active in our + * MLD but was not added during previous association, we can + * still honor the Tx to that ML STA via the requested link. + * The control would reach here in such case only when that link + * address is same as the MLD address or in worst case clients + * used MLD address at TA wrongly which would have helped + * identify the ML sta object and pass it here. + * If the link address of that STA is different from MLD address, + * then the sta object would be NULL and control won't reach + * here but return at the start of the function itself with !sta + * check. Also this would not need any translation at hdr->addr1 + * from MLD to link address since the RA is the MLD address + * (same as that link address ideally) already. + */ + ether_addr_copy(hdr->addr2, bss_conf->addr); + + if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid) + ether_addr_copy(hdr->addr3, bss_conf->bssid); + else if (vif->type == NL80211_IFTYPE_AP) + ether_addr_copy(hdr->addr3, bss_conf->addr); + } + + return link; +} + +/* Note: called under rcu_read_lock() */ static void ath12k_mac_op_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); - struct ath12k *ar = hw->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif = &ahvif->deflink; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key_conf *key = info->control.hw_key; + struct ieee80211_sta *sta = control->sta; u32 info_flags = info->flags; + struct ath12k *ar; bool is_prb_rsp; + u8 link_id; int ret; + link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK); memset(skb_cb, 0, sizeof(*skb_cb)); skb_cb->vif = vif; @@ -4928,101 +7196,91 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw, skb_cb->flags |= ATH12K_SKB_CIPHER_SET; } + /* handle only for MLO case, use deflink for non MLO case */ + if (ieee80211_vif_is_mld(vif)) { + link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags); + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { + ieee80211_free_txskb(hw, skb); + return; + } + } else { + link_id = 0; + } + + arvif = rcu_dereference(ahvif->link[link_id]); + if (!arvif || !arvif->ar) { + ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission", + link_id); + ieee80211_free_txskb(hw, skb); + return; + } + + ar = arvif->ar; + skb_cb->link_id = link_id; + is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP; } else if (ieee80211_is_mgmt(hdr->frame_control)) { - is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp); if (ret) { ath12k_warn(ar->ab, "failed to queue management frame %d\n", ret); - ieee80211_free_txskb(ar->hw, skb); + ieee80211_free_txskb(hw, skb); } return; } + /* This is case only for P2P_GO */ + if (vif->type == NL80211_IFTYPE_AP && vif->p2p) + ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp); + ret = ath12k_dp_tx(ar, arvif, skb); if (ret) { ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret); - ieee80211_free_txskb(ar->hw, skb); + ieee80211_free_txskb(hw, skb); } } void ath12k_mac_drain_tx(struct ath12k *ar) { + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + /* make sure rcu-protected mac80211 tx path itself is drained */ synchronize_net(); - cancel_work_sync(&ar->wmi_mgmt_tx_work); + wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work); ath12k_mgmt_over_wmi_tx_purge(ar); } static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) { - return -ENOTSUPP; + return -EOPNOTSUPP; /* TODO: Need to support new monitor mode */ } -static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab) +static int ath12k_mac_start(struct ath12k *ar) { - int recovery_start_count; - - if (!ab->is_reset) - return; - - recovery_start_count = atomic_inc_return(&ab->recovery_start_count); - - ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count); - - if (recovery_start_count == ab->num_radios) { - complete(&ab->recovery_start); - ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n"); - } - - ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n"); - - wait_for_completion_timeout(&ab->reconfigure_complete, - ATH12K_RECONFIGURE_TIMEOUT_HZ); -} - -static int ath12k_mac_op_start(struct ieee80211_hw *hw) -{ - struct ath12k *ar = hw->priv; + struct ath12k_hw *ah = ar->ah; struct ath12k_base *ab = ar->ab; struct ath12k_pdev *pdev = ar->pdev; int ret; - ath12k_mac_drain_tx(ar); - mutex_lock(&ar->conf_mutex); - - switch (ar->state) { - case ATH12K_STATE_OFF: - ar->state = ATH12K_STATE_ON; - break; - case ATH12K_STATE_RESTARTING: - ar->state = ATH12K_STATE_RESTARTED; - ath12k_mac_wait_reconfigure(ab); - break; - case ATH12K_STATE_RESTARTED: - case ATH12K_STATE_WEDGED: - case ATH12K_STATE_ON: - WARN_ON(1); - ret = -EINVAL; - goto err; - } + lockdep_assert_held(&ah->hw_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1, pdev->pdev_id); if (ret) { - ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret); + ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret); goto err; } ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1, pdev->pdev_id); if (ret) { - ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret); + ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret); goto err; } @@ -5052,7 +7310,7 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw) 1, pdev->pdev_id); if (ret) { - ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); + ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); goto err; } @@ -5071,14 +7329,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw) * such as rssi, rx_duration. */ ret = ath12k_mac_config_mon_status_default(ar, true); - if (ret && (ret != -ENOTSUPP)) { + if (ret && (ret != -EOPNOTSUPP)) { ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n", ret); goto err; } - if (ret == -ENOTSUPP) - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + if (ret == -EOPNOTSUPP) + ath12k_dbg(ab, ATH12K_DBG_MAC, "monitor status config is not yet supported"); /* Configure the hash seed for hash based reo dest ring selection */ @@ -5094,40 +7352,153 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw) } } - mutex_unlock(&ar->conf_mutex); - rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], &ab->pdevs[ar->pdev_idx]); return 0; - err: - ar->state = ATH12K_STATE_OFF; - mutex_unlock(&ar->conf_mutex); return ret; } -static void ath12k_mac_op_stop(struct ieee80211_hw *hw) +static void ath12k_drain_tx(struct ath12k_hw *ah) +{ + struct ath12k *ar; + int i; + + lockdep_assert_wiphy(ah->hw->wiphy); + + for_each_ar(ah, ar, i) + ath12k_mac_drain_tx(ar); +} + +static int ath12k_mac_op_start(struct ieee80211_hw *hw) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + int ret, i; + + lockdep_assert_wiphy(hw->wiphy); + + ath12k_drain_tx(ah); + + guard(mutex)(&ah->hw_mutex); + + switch (ah->state) { + case ATH12K_HW_STATE_OFF: + ah->state = ATH12K_HW_STATE_ON; + break; + case ATH12K_HW_STATE_RESTARTING: + ah->state = ATH12K_HW_STATE_RESTARTED; + break; + case ATH12K_HW_STATE_RESTARTED: + case ATH12K_HW_STATE_WEDGED: + case ATH12K_HW_STATE_ON: + ah->state = ATH12K_HW_STATE_OFF; + + WARN_ON(1); + return -EINVAL; + } + + for_each_ar(ah, ar, i) { + ret = ath12k_mac_start(ar); + if (ret) { + ah->state = ATH12K_HW_STATE_OFF; + + ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n", + ar->pdev_idx, ret); + goto fail_start; + } + } + + return 0; + +fail_start: + for (; i > 0; i--) { + ar = ath12k_ah_to_ar(ah, i - 1); + ath12k_mac_stop(ar); + } + + return ret; +} + +int ath12k_mac_rfkill_config(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + u32 param; + int ret; + + if (ab->hw_params->rfkill_pin == 0) + return -EOPNOTSUPP; + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d", + ab->hw_params->rfkill_pin, ab->hw_params->rfkill_cfg, + ab->hw_params->rfkill_on_level); + + param = u32_encode_bits(ab->hw_params->rfkill_on_level, + WMI_RFKILL_CFG_RADIO_LEVEL) | + u32_encode_bits(ab->hw_params->rfkill_pin, + WMI_RFKILL_CFG_GPIO_PIN_NUM) | + u32_encode_bits(ab->hw_params->rfkill_cfg, + WMI_RFKILL_CFG_PIN_AS_GPIO); + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG, + param, ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ab, + "failed to set rfkill config 0x%x: %d\n", + param, ret); + return ret; + } + + return 0; +} + +int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable) { - struct ath12k *ar = hw->priv; + enum wmi_rfkill_enable_radio param; + int ret; + + if (enable) + param = WMI_RFKILL_ENABLE_RADIO_ON; + else + param = WMI_RFKILL_ENABLE_RADIO_OFF; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac %d rfkill enable %d", + ar->pdev_idx, param); + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE, + param, ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n", + param, ret); + return ret; + } + + return 0; +} + +static void ath12k_mac_stop(struct ath12k *ar) +{ + struct ath12k_hw *ah = ar->ah; struct htt_ppdu_stats_info *ppdu_stats, *tmp; int ret; - ath12k_mac_drain_tx(ar); + lockdep_assert_held(&ah->hw_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - mutex_lock(&ar->conf_mutex); ret = ath12k_mac_config_mon_status_default(ar, false); - if (ret && (ret != -ENOTSUPP)) + if (ret && (ret != -EOPNOTSUPP)) ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", ret); - clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); - ar->state = ATH12K_STATE_OFF; - mutex_unlock(&ar->conf_mutex); + clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags); cancel_delayed_work_sync(&ar->scan.timeout); + wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk); cancel_work_sync(&ar->regd_update_work); + cancel_work_sync(&ar->ab->rfkill_work); spin_lock_bh(&ar->data_lock); list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { @@ -5143,8 +7514,28 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw) atomic_set(&ar->num_pending_mgmt_tx, 0); } +static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + int i; + + lockdep_assert_wiphy(hw->wiphy); + + ath12k_drain_tx(ah); + + mutex_lock(&ah->hw_mutex); + + ah->state = ATH12K_HW_STATE_OFF; + + for_each_ar(ah, ar, i) + ath12k_mac_stop(ar); + + mutex_unlock(&ah->hw_mutex); +} + static u8 -ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) +ath12k_mac_get_vdev_stats_id(struct ath12k_link_vif *arvif) { struct ath12k_base *ab = arvif->ar->ab; u8 vdev_stats_id = 0; @@ -5152,7 +7543,7 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) do { if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) { vdev_stats_id++; - if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) { + if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) { vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID; break; } @@ -5166,17 +7557,73 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) return vdev_stats_id; } -static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, - struct ath12k_wmi_vdev_create_arg *arg) +static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif, + u32 *flags, u32 *tx_vdev_id) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif; + struct ieee80211_bss_conf *link_conf; + struct ath12k *ar = arvif->ar; + struct ath12k_link_vif *tx_arvif; + struct ath12k_vif *tx_ahvif; + + if (!tx_vif) + return 0; + + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in set mbssid params for vif %pM link %u\n", + ahvif->vif->addr, arvif->link_id); + return -ENOLINK; + } + + tx_ahvif = ath12k_vif_to_ahvif(tx_vif); + tx_arvif = &tx_ahvif->deflink; + + if (link_conf->nontransmitted) { + if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) + return -EINVAL; + + *flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP; + *tx_vdev_id = tx_arvif->vdev_id; + } else if (tx_arvif == arvif) { + *flags = WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP; + } else { + return -EINVAL; + } + + if (link_conf->ema_ap) + *flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE; + + return 0; +} + +static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif, + struct ath12k_wmi_vdev_create_arg *arg) { struct ath12k *ar = arvif->ar; struct ath12k_pdev *pdev = ar->pdev; + struct ath12k_vif *ahvif = arvif->ahvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); arg->if_id = arvif->vdev_id; - arg->type = arvif->vdev_type; - arg->subtype = arvif->vdev_subtype; + arg->type = ahvif->vdev_type; + arg->subtype = ahvif->vdev_subtype; arg->pdev_id = pdev->pdev_id; + arg->mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP; + arg->mbssid_tx_vdev_id = 0; + if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT, + ar->ab->wmi_ab.svc_map)) { + ret = ath12k_mac_setup_vdev_params_mbssid(arvif, + &arg->mbssid_flags, + &arg->mbssid_tx_vdev_id); + if (ret) + return ret; + } + if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; @@ -5192,6 +7639,18 @@ static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, } arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif); + + if (ath12k_mac_is_ml_arvif(arvif)) { + if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) { + ath12k_warn(ar->ab, "too many MLO links during setting up vdev: %d", + ahvif->vif->valid_links); + return -EINVAL; + } + + ether_addr_copy(arg->mld_addr, ahvif->vif->addr); + } + + return 0; } static u32 @@ -5229,14 +7688,15 @@ ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype) } static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar, - struct ath12k_vif *arvif) + struct ath12k_link_vif *arvif) { u32 param_id, param_value; struct ath12k_base *ab = ar->ab; + struct ath12k_vif *ahvif = arvif->ahvif; int ret; param_id = WMI_VDEV_PARAM_SET_HEMU_MODE; - param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type); + param_value = ath12k_mac_prepare_he_mode(ar->pdev, ahvif->vif->type); ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { @@ -5259,12 +7719,12 @@ static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar, return ret; } -static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void ath12k_mac_update_vif_offload(struct ath12k_link_vif *arvif) { - struct ath12k *ar = hw->priv; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); u32 param_id, param_value; int ret; @@ -5275,14 +7735,14 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, IEEE80211_OFFLOAD_DECAP_ENABLED); if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) - arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET; + ahvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET; else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) - arvif->tx_encap_type = ATH12K_HW_TXRX_RAW; + ahvif->tx_encap_type = ATH12K_HW_TXRX_RAW; else - arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI; + ahvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI; ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - param_id, arvif->tx_encap_type); + param_id, ahvif->tx_encap_type); if (ret) { ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n", arvif->vdev_id, ret); @@ -5306,100 +7766,128 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, } } -static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { - struct ath12k *ar = hw->priv; - struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; - struct ath12k_wmi_peer_create_arg peer_param; - u32 param_id, param_value; - u16 nss; - int i; - int ret; - int bit; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + unsigned long links; + int link_id; - vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + lockdep_assert_wiphy(hw->wiphy); - mutex_lock(&ar->conf_mutex); + if (vif->valid_links) { + links = vif->valid_links; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!(arvif && arvif->ar)) + continue; - if (vif->type == NL80211_IFTYPE_AP && - ar->num_peers > (ar->max_num_peers - 1)) { - ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); - ret = -ENOBUFS; - goto err; - } + ath12k_mac_update_vif_offload(arvif); + } - if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { - ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", - TARGET_NUM_VDEVS); - ret = -EBUSY; - goto err; + return; } - memset(arvif, 0, sizeof(*arvif)); + ath12k_mac_update_vif_offload(&ahvif->deflink); +} - arvif->ar = ar; - arvif->vif = vif; +int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif) +{ + struct ath12k_hw *ah = ar->ah; + struct ath12k_base *ab = ar->ab; + struct ieee80211_hw *hw = ah->hw; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; + struct ath12k_wmi_peer_create_arg peer_param = {0}; + struct ieee80211_bss_conf *link_conf; + u32 param_id, param_value; + u16 nss; + int i; + int ret, vdev_id; + u8 link_id; - INIT_LIST_HEAD(&arvif->list); + lockdep_assert_wiphy(hw->wiphy); - /* Should we initialize any worker to handle connection loss indication - * from firmware in sta mode? + /* If no link is active and scan vdev is requested + * use a default link conf for scan address purpose. */ + if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links) + link_id = ffs(vif->valid_links) - 1; + else + link_id = arvif->link_id; - for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { - arvif->bitrate_mask.control[i].legacy = 0xffffffff; - memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, - sizeof(arvif->bitrate_mask.control[i].ht_mcs)); - memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, - sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n", + vif->addr, arvif->link_id); + return -ENOLINK; } - bit = __ffs64(ab->free_vdev_map); + memcpy(arvif->bssid, link_conf->addr, ETH_ALEN); - arvif->vdev_id = bit; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; + arvif->ar = ar; + vdev_id = __ffs64(ab->free_vdev_map); + arvif->vdev_id = vdev_id; + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; switch (vif->type) { case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: - arvif->vdev_type = WMI_VDEV_TYPE_STA; + ahvif->vdev_type = WMI_VDEV_TYPE_STA; + + if (vif->p2p) + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; + break; case NL80211_IFTYPE_MESH_POINT: - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; fallthrough; case NL80211_IFTYPE_AP: - arvif->vdev_type = WMI_VDEV_TYPE_AP; + ahvif->vdev_type = WMI_VDEV_TYPE_AP; + + if (vif->p2p) + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; + break; case NL80211_IFTYPE_MONITOR: - arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; - ar->monitor_vdev_id = bit; + ahvif->vdev_type = WMI_VDEV_TYPE_MONITOR; + ar->monitor_vdev_id = vdev_id; + break; + case NL80211_IFTYPE_P2P_DEVICE: + ahvif->vdev_type = WMI_VDEV_TYPE_STA; + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; break; default: WARN_ON(1); break; } - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n", - arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n", + arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype, ab->free_vdev_map); vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1); for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1); - ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg); + ret = ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg); + if (ret) { + ath12k_warn(ab, "failed to create vdev parameters %d: %d\n", + arvif->vdev_id, ret); + goto err; + } - ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg); + ret = ath12k_wmi_vdev_create(ar, arvif->bssid, &vdev_arg); if (ret) { ath12k_warn(ab, "failed to create WMI vdev %d: %d\n", arvif->vdev_id, ret); - goto err; + return ret; } ar->num_created_vdevs++; + arvif->is_created = true; ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n", vif->addr, arvif->vdev_id); ar->allocated_vdev_map |= 1LL << arvif->vdev_id; @@ -5409,7 +7897,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, list_add(&arvif->list, &ar->arvifs); spin_unlock_bh(&ar->data_lock); - ath12k_mac_op_update_vif_offload(hw, vif); + ath12k_mac_update_vif_offload(arvif); nss = hweight32(ar->cfg_tx_chainmask) ? : 1; ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, @@ -5420,10 +7908,10 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, goto err_vdev_del; } - switch (arvif->vdev_type) { + switch (ahvif->vdev_type) { case WMI_VDEV_TYPE_AP: peer_param.vdev_id = arvif->vdev_id; - peer_param.peer_addr = vif->addr; + peer_param.peer_addr = arvif->bssid; peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; ret = ath12k_peer_create(ar, arvif, NULL, &peer_param); if (ret) { @@ -5481,13 +7969,13 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, break; } - arvif->txpower = vif->bss_conf.txpower; + arvif->txpower = link_conf->txpower; ret = ath12k_mac_txpower_recalc(ar); if (ret) goto err_peer_del; param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; - param_value = ar->hw->wiphy->rts_threshold; + param_value = hw->wiphy->rts_threshold; ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { @@ -5496,30 +7984,27 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, } ath12k_dp_vdev_tx_attach(ar, arvif); - if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled) ath12k_mac_monitor_vdev_create(ar); - mutex_unlock(&ar->conf_mutex); - return ret; err_peer_del: - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { reinit_completion(&ar->peer_delete_done); - ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr, + ret = ath12k_wmi_send_peer_delete_cmd(ar, arvif->bssid, arvif->vdev_id); if (ret) { ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", - arvif->vdev_id, vif->addr); + arvif->vdev_id, arvif->bssid); goto err; } ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, - vif->addr); + arvif->bssid); if (ret) - goto err; + goto err_vdev_del; ar->num_peers--; } @@ -5527,6 +8012,8 @@ err_peer_del: err_vdev_del: ath12k_wmi_vdev_delete(ar, arvif->vdev_id); ar->num_created_vdevs--; + arvif->is_created = false; + arvif->ar = NULL; ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); ab->free_vdev_map |= 1LL << arvif->vdev_id; ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); @@ -5535,11 +8022,217 @@ err_vdev_del: spin_unlock_bh(&ar->data_lock); err: - mutex_unlock(&ar->conf_mutex); - + arvif->ar = NULL; return ret; } +static void ath12k_mac_vif_flush_key_cache(struct ath12k_link_vif *arvif) +{ + struct ath12k_key_conf *key_conf, *tmp; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_hw *ah = ahvif->ah; + struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; + struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id]; + int ret; + + lockdep_assert_wiphy(ah->hw->wiphy); + + list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) { + arsta = NULL; + if (key_conf->sta) { + ahsta = ath12k_sta_to_ahsta(key_conf->sta); + arsta = wiphy_dereference(ah->hw->wiphy, + ahsta->link[arvif->link_id]); + if (!arsta) + goto free_cache; + } + + ret = ath12k_mac_set_key(arvif->ar, key_conf->cmd, + arvif, arsta, + key_conf->key); + if (ret) + ath12k_warn(arvif->ar->ab, "unable to apply set key param to vdev %d ret %d\n", + arvif->vdev_id, ret); +free_cache: + list_del(&key_conf->list); + kfree(key_conf); + } +} + +static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id]; + struct ath12k_base *ab = ar->ab; + + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + if (!cache) + return; + + if (cache->tx_conf.changed) { + ret = ath12k_mac_conf_tx(arvif, cache->tx_conf.ac, + &cache->tx_conf.tx_queue_params); + if (ret) + ath12k_warn(ab, + "unable to apply tx config parameters to vdev %d\n", + ret); + } + + if (cache->bss_conf_changed) { + ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf, + cache->bss_conf_changed); + } + + if (!list_empty(&cache->key_conf.list)) + ath12k_mac_vif_flush_key_cache(arvif); + + ath12k_ahvif_put_link_cache(ahvif, arvif->link_id); +} + +static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw, + struct ath12k_link_vif *arvif, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ath12k_link_vif *scan_arvif; + struct ath12k_hw *ah = hw->priv; + struct ath12k *ar; + struct ath12k_base *ab; + u8 link_id = arvif->link_id; + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + if (ah->num_radio == 1) + ar = ah->radio; + else if (ctx) + ar = ath12k_get_ar_by_ctx(hw, ctx); + else + return NULL; + + if (!ar) + return NULL; + + /* cleanup the scan vdev if we are done scan on that ar + * and now we want to create for actual usage. + */ + if (ieee80211_vif_is_mld(vif)) { + scan_arvif = wiphy_dereference(hw->wiphy, + ahvif->link[ATH12K_DEFAULT_SCAN_LINK]); + if (scan_arvif && scan_arvif->ar == ar) { + ar->scan.arvif = NULL; + ath12k_mac_remove_link_interface(hw, scan_arvif); + ath12k_mac_unassign_link_vif(scan_arvif); + } + } + + if (arvif->ar) { + /* This is not expected really */ + if (WARN_ON(!arvif->is_created)) { + arvif->ar = NULL; + return NULL; + } + + if (ah->num_radio == 1) + return arvif->ar; + + /* This can happen as scan vdev gets created during multiple scans + * across different radios before a vdev is brought up in + * a certain radio. + */ + if (ar != arvif->ar) { + if (WARN_ON(arvif->is_started)) + return NULL; + + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); + } + } + + ab = ar->ab; + + /* Assign arvif again here since previous radio switch block + * would've unassigned and cleared it. + */ + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + if (vif->type == NL80211_IFTYPE_AP && + ar->num_peers > (ar->max_num_peers - 1)) { + ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); + goto unlock; + } + + if (arvif->is_created) + goto flush; + + if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { + ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", + TARGET_NUM_VDEVS); + goto unlock; + } + + ret = ath12k_mac_vdev_create(ar, arvif); + if (ret) { + ath12k_warn(ab, "failed to create vdev %pM ret %d", vif->addr, ret); + goto unlock; + } + +flush: + /* If the vdev is created during channel assign and not during + * add_interface(), Apply any parameters for the vdev which were received + * after add_interface, corresponding to this vif. + */ + ath12k_mac_vif_cache_flush(ar, arvif); +unlock: + return arvif->ar; +} + +static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + int i; + + lockdep_assert_wiphy(hw->wiphy); + + memset(ahvif, 0, sizeof(*ahvif)); + + ahvif->ah = ah; + ahvif->vif = vif; + arvif = &ahvif->deflink; + arvif->ahvif = ahvif; + + INIT_LIST_HEAD(&arvif->list); + INIT_DELAYED_WORK(&arvif->connection_loss_work, + ath12k_mac_vif_sta_connection_loss_work); + + for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { + arvif->bitrate_mask.control[i].legacy = 0xffffffff; + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + } + + /* Allocate Default Queue now and reassign during actual vdev create */ + vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; + for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) + vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; + + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + /* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver + * will not know if this interface is an ML vif at this point. + */ + return 0; +} + static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif) { struct ath12k_tx_desc_info *tx_desc_info; @@ -5565,26 +8258,15 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif } } -static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arvif) { - struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ath12k_base *ab = ar->ab; unsigned long time_left; int ret; - mutex_lock(&ar->conf_mutex); - - ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n", - arvif->vdev_id); - - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr); - if (ret) - ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", - arvif->vdev_id, ret); - } + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->vdev_delete_done); @@ -5602,18 +8284,17 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, goto err_vdev_del; } - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + ab->free_vdev_map |= 1LL << arvif->vdev_id; + ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); + ar->num_created_vdevs--; + + if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ar->monitor_vdev_id = -1; ar->monitor_vdev_created = false; } else if (ar->monitor_vdev_created && !ar->monitor_started) { ret = ath12k_mac_monitor_vdev_delete(ar); } - ab->free_vdev_map |= 1LL << (arvif->vdev_id); - ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); - ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); - ar->num_created_vdevs--; - ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", vif->addr, arvif->vdev_id); @@ -5623,6 +8304,7 @@ err_vdev_del: spin_unlock_bh(&ar->data_lock); ath12k_peer_cleanup(ar, arvif->vdev_id); + ath12k_ahvif_put_link_cache(ahvif, arvif->link_id); idr_for_each(&ar->txmgmt_idr, ath12k_mac_vif_txmgmt_idr_remove, vif); @@ -5632,11 +8314,61 @@ err_vdev_del: /* Recalc txpower for remaining vdev */ ath12k_mac_txpower_recalc(ar); - clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); /* TODO: recal traffic pause state based on the available vdevs */ + arvif->is_created = false; + arvif->ar = NULL; + + return ret; +} + +static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + struct ath12k *ar; + u8 link_id; + + lockdep_assert_wiphy(hw->wiphy); + + for (link_id = 0; link_id < ATH12K_NUM_MAX_LINKS; link_id++) { + /* if we cached some config but never received assign chanctx, + * free the allocated cache. + */ + ath12k_ahvif_put_link_cache(ahvif, link_id); + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!arvif || !arvif->is_created) + continue; - mutex_unlock(&ar->conf_mutex); + ar = arvif->ar; + + /* Scan abortion is in progress since before this, cancel_hw_scan() + * is expected to be executed. Since link is anyways going to be removed + * now, just cancel the worker and send the scan aborted to user space + */ + if (ar->scan.arvif == arvif) { + wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk); + + spin_lock_bh(&ar->data_lock); + ar->scan.arvif = NULL; + if (!ar->scan.is_roc) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(ar->ah->hw, &info); + } + + ar->scan.state = ATH12K_SCAN_IDLE; + ar->scan_channel = NULL; + ar->scan.roc_freq = 0; + spin_unlock_bh(&ar->data_lock); + } + + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); + } } /* FIXME: Has to be verified. */ @@ -5649,82 +8381,103 @@ err_vdev_del: FIF_PROBE_REQ | \ FIF_FCSFAIL) -static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) +static void ath12k_mac_configure_filter(struct ath12k *ar, + unsigned int total_flags) { - struct ath12k *ar = hw->priv; bool reset_flag; int ret; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - changed_flags &= SUPPORTED_FILTERS; - *total_flags &= SUPPORTED_FILTERS; - ar->filter_flags = *total_flags; + ar->filter_flags = total_flags; /* For monitor mode */ reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); - if (!ret) { - if (!reset_flag) - set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); - else - clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); - } else { + if (ret) ath12k_warn(ar->ab, "fail to set monitor filter: %d\n", ret); - } + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, - "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n", - changed_flags, *total_flags, reset_flag); + "total_flags:0x%x, reset_flag:%d\n", + total_flags, reset_flag); +} - mutex_unlock(&ar->conf_mutex); +static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + + lockdep_assert_wiphy(hw->wiphy); + + ar = ath12k_ah_to_ar(ah, 0); + + *total_flags &= SUPPORTED_FILTERS; + ath12k_mac_configure_filter(ar, *total_flags); } static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { - struct ath12k *ar = hw->priv; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + int antennas_rx = 0, antennas_tx = 0; + struct ath12k *ar; + int i; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); - *tx_ant = ar->cfg_tx_chainmask; - *rx_ant = ar->cfg_rx_chainmask; + for_each_ar(ah, ar, i) { + antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask); + antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask); + } - mutex_unlock(&ar->conf_mutex); + *tx_ant = antennas_tx; + *rx_ant = antennas_rx; return 0; } static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) { - struct ath12k *ar = hw->priv; - int ret; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + int ret = 0; + int i; + + lockdep_assert_wiphy(hw->wiphy); - mutex_lock(&ar->conf_mutex); - ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); - mutex_unlock(&ar->conf_mutex); + for_each_ar(ah, ar, i) { + ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); + if (ret) + break; + } return ret; } -static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) +static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params, + u8 link_id) { - struct ath12k *ar = hw->priv; + struct ath12k *ar; int ret = -EINVAL; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); + + ar = ath12k_get_ar_by_vif(hw, vif, link_id); + if (!ar) + return -EINVAL; switch (params->action) { case IEEE80211_AMPDU_RX_START: - ret = ath12k_dp_rx_ampdu_start(ar, params); + ret = ath12k_dp_rx_ampdu_start(ar, params, link_id); break; case IEEE80211_AMPDU_RX_STOP: - ret = ath12k_dp_rx_ampdu_stop(ar, params); + ret = ath12k_dp_rx_ampdu_stop(ar, params, link_id); break; case IEEE80211_AMPDU_TX_START: case IEEE80211_AMPDU_TX_STOP_CONT: @@ -5738,23 +8491,55 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, break; } - mutex_unlock(&ar->conf_mutex); + if (ret) + ath12k_warn(ar->ab, "unable to perform ampdu action %d for vif %pM link %u ret %d\n", + params->action, vif->addr, link_id, ret); return ret; } +static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct ieee80211_sta *sta = params->sta; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + unsigned long links_map = ahsta->links_map; + int ret = -EINVAL; + u8 link_id; + + lockdep_assert_wiphy(hw->wiphy); + + if (WARN_ON(!links_map)) + return ret; + + for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) { + ret = ath12k_mac_ampdu_action(hw, vif, params, link_id); + if (ret) + return ret; + } + + return 0; +} + static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { - struct ath12k *ar = hw->priv; - struct ath12k_base *ab = ar->ab; + struct ath12k *ar; + struct ath12k_base *ab; + + lockdep_assert_wiphy(hw->wiphy); + + ar = ath12k_get_ar_by_ctx(hw, ctx); + if (!ar) + return -EINVAL; + + ab = ar->ab; ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx add freq %u width %d ptr %pK\n", + "mac chanctx add freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); - mutex_lock(&ar->conf_mutex); - spin_lock_bh(&ar->data_lock); /* TODO: In case of multiple channel context, populate rx_channel from * Rx PPDU desc information. @@ -5762,45 +8547,173 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, ar->rx_channel = ctx->def.chan; spin_unlock_bh(&ar->data_lock); - mutex_unlock(&ar->conf_mutex); - return 0; } static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { - struct ath12k *ar = hw->priv; - struct ath12k_base *ab = ar->ab; + struct ath12k *ar; + struct ath12k_base *ab; + + lockdep_assert_wiphy(hw->wiphy); + + ar = ath12k_get_ar_by_ctx(hw, ctx); + if (!ar) + return; + + ab = ar->ab; ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx remove freq %u width %d ptr %pK\n", + "mac chanctx remove freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); - mutex_lock(&ar->conf_mutex); - spin_lock_bh(&ar->data_lock); /* TODO: In case of there is one more channel context left, populate * rx_channel with the channel of that remaining channel context. */ ar->rx_channel = NULL; spin_unlock_bh(&ar->data_lock); +} + +static enum wmi_phy_mode +ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar, + enum wmi_phy_mode mode, + enum nl80211_band band, + enum nl80211_iftype type) +{ + struct ieee80211_sta_eht_cap *eht_cap = NULL; + enum wmi_phy_mode down_mode; + int n = ar->mac.sbands[band].n_iftype_data; + int i; + struct ieee80211_sband_iftype_data *data; + + if (mode < MODE_11BE_EHT20) + return mode; + + data = ar->mac.iftype[band]; + for (i = 0; i < n; i++) { + if (data[i].types_mask & BIT(type)) { + eht_cap = &data[i].eht_cap; + break; + } + } - mutex_unlock(&ar->conf_mutex); + if (eht_cap && eht_cap->has_eht) + return mode; + + switch (mode) { + case MODE_11BE_EHT20: + down_mode = MODE_11AX_HE20; + break; + case MODE_11BE_EHT40: + down_mode = MODE_11AX_HE40; + break; + case MODE_11BE_EHT80: + down_mode = MODE_11AX_HE80; + break; + case MODE_11BE_EHT80_80: + down_mode = MODE_11AX_HE80_80; + break; + case MODE_11BE_EHT160: + case MODE_11BE_EHT160_160: + case MODE_11BE_EHT320: + down_mode = MODE_11AX_HE160; + break; + case MODE_11BE_EHT20_2G: + down_mode = MODE_11AX_HE20_2G; + break; + case MODE_11BE_EHT40_2G: + down_mode = MODE_11AX_HE40_2G; + break; + default: + down_mode = mode; + break; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac vdev start phymode %s downgrade to %s\n", + ath12k_mac_phymode_str(mode), + ath12k_mac_phymode_str(down_mode)); + + return down_mode; +} + +static void +ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif, + struct wmi_ml_arg *ml_arg) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct wmi_ml_partner_info *partner_info; + struct ieee80211_bss_conf *link_conf; + struct ath12k_link_vif *arvif_p; + unsigned long links; + u8 link_id; + + lockdep_assert_wiphy(ahvif->ah->hw->wiphy); + + if (!ath12k_mac_is_ml_arvif(arvif)) + return; + + if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) + return; + + ml_arg->enabled = true; + + /* Driver always add a new link via VDEV START, FW takes + * care of internally adding this link to existing + * link vdevs which are advertised as partners below + */ + ml_arg->link_add = true; + partner_info = ml_arg->partner_info; + + links = ahvif->links_map; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif_p = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]); + + if (WARN_ON(!arvif_p)) + continue; + + if (arvif == arvif_p) + continue; + + link_conf = wiphy_dereference(ahvif->ah->hw->wiphy, + ahvif->vif->link_conf[arvif_p->link_id]); + + if (!link_conf) + continue; + + partner_info->vdev_id = arvif_p->vdev_id; + partner_info->hw_link_id = arvif_p->ar->pdev->hw_link_id; + ether_addr_copy(partner_info->addr, link_conf->addr); + ml_arg->num_partner_links++; + partner_info++; + } } static int -ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef, +ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif, + struct ieee80211_chanctx_conf *ctx, bool restart) { struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; struct wmi_vdev_start_req_arg arg = {}; - int he_support = arvif->vif->bss_conf.he_support; + const struct cfg80211_chan_def *chandef = &ctx->def; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_bss_conf *link_conf; + unsigned int dfs_cac_time; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); + + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in vdev start for vif %pM link %u\n", + ahvif->vif->addr, arvif->link_id); + return -ENOLINK; + } reinit_completion(&ar->vdev_setup_done); @@ -5814,22 +8727,38 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, arg.band_center_freq2 = chandef->center_freq2; arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; + arg.mode = ath12k_mac_check_down_grade_phy_mode(ar, arg.mode, + chandef->chan->band, + ahvif->vif->type); arg.min_power = 0; - arg.max_power = chandef->chan->max_power * 2; - arg.max_reg_power = chandef->chan->max_reg_power * 2; - arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2; + arg.max_power = chandef->chan->max_power; + arg.max_reg_power = chandef->chan->max_reg_power; + arg.max_antenna_gain = chandef->chan->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - arg.ssid = arvif->u.ap.ssid; - arg.ssid_len = arvif->u.ap.ssid_len; - arg.hidden_ssid = arvif->u.ap.hidden_ssid; + arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP; + arg.mbssid_tx_vdev_id = 0; + if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT, + ar->ab->wmi_ab.svc_map)) { + ret = ath12k_mac_setup_vdev_params_mbssid(arvif, + &arg.mbssid_flags, + &arg.mbssid_tx_vdev_id); + if (ret) + return ret; + } + + if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { + arg.ssid = ahvif->u.ap.ssid; + arg.ssid_len = ahvif->u.ap.ssid_len; + arg.hidden_ssid = ahvif->u.ap.hidden_ssid; /* For now allow DFS for AP mode */ arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + arg.freq2_radar = ctx->radar_enabled; + arg.passive = arg.chan_radar; spin_lock_bh(&ab->base_lock); @@ -5837,7 +8766,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, spin_unlock_bh(&ab->base_lock); /* TODO: Notify if secondary 80Mhz also needs radar detection */ - if (he_support) { + if (link_conf->he_support) { ret = ath12k_set_he_mu_sounding_mode(ar, arvif); if (ret) { ath12k_warn(ar->ab, "failed to set he mode vdev %i\n", @@ -5849,6 +8778,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); + if (!restart) + ath12k_mac_mlo_get_vdev_args(arvif, &arg.ml); + ath12k_dbg(ab, ATH12K_DBG_MAC, "mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n", arg.vdev_id, arg.freq, @@ -5870,22 +8802,22 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, ar->num_started_vdevs++; ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n", - arvif->vif->addr, arvif->vdev_id); + ahvif->vif->addr, arvif->vdev_id); - /* Enable CAC Flag in the driver by checking the channel DFS cac time, - * i.e dfs_cac_ms value which will be valid only for radar channels - * and state as NL80211_DFS_USABLE which indicates CAC needs to be - * done before channel usage. This flags is used to drop rx packets. + /* Enable CAC Running Flag in the driver by checking all sub-channel's DFS + * state as NL80211_DFS_USABLE which indicates CAC needs to be + * done before channel usage. This flag is used to drop rx packets. * during CAC. */ /* TODO: Set the flag for other interface types as required */ - if (arvif->vdev_type == WMI_VDEV_TYPE_AP && - chandef->chan->dfs_cac_ms && - chandef->chan->dfs_state == NL80211_DFS_USABLE) { - set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); + if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled && + cfg80211_chandef_dfs_usable(hw->wiphy, chandef)) { + set_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags); + dfs_cac_time = cfg80211_chandef_dfs_cac_time(hw->wiphy, chandef); + ath12k_dbg(ab, ATH12K_DBG_MAC, - "CAC Started in chan_freq %d for vdev %d\n", - arg.freq, arg.vdev_id); + "CAC started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n", + dfs_cac_time, arg.freq, arg.band_center_freq1, arg.vdev_id); } ret = ath12k_mac_set_txbf_conf(arvif); @@ -5896,56 +8828,16 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, return 0; } -static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif) -{ - struct ath12k *ar = arvif->ar; - int ret; - - lockdep_assert_held(&ar->conf_mutex); - - reinit_completion(&ar->vdev_setup_done); - - ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id); - if (ret) { - ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n", - arvif->vdev_id, ret); - goto err; - } - - ret = ath12k_mac_vdev_setup_sync(ar); - if (ret) { - ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n", - arvif->vdev_id, ret); - goto err; - } - - WARN_ON(ar->num_started_vdevs == 0); - - ar->num_started_vdevs--; - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", - arvif->vif->addr, arvif->vdev_id); - - if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { - clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n", - arvif->vdev_id); - } - - return 0; -err: - return ret; -} - -static int ath12k_mac_vdev_start(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef) +static int ath12k_mac_vdev_start(struct ath12k_link_vif *arvif, + struct ieee80211_chanctx_conf *ctx) { - return ath12k_mac_vdev_start_restart(arvif, chandef, false); + return ath12k_mac_vdev_start_restart(arvif, ctx, false); } -static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef) +static int ath12k_mac_vdev_restart(struct ath12k_link_vif *arvif, + struct ieee80211_chanctx_conf *ctx) { - return ath12k_mac_vdev_start_restart(arvif, chandef, true); + return ath12k_mac_vdev_start_restart(arvif, ctx, true); } struct ath12k_mac_change_chanctx_arg { @@ -5953,38 +8845,135 @@ struct ath12k_mac_change_chanctx_arg { struct ieee80211_vif_chanctx_switch *vifs; int n_vifs; int next_vif; + struct ath12k *ar; }; static void ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_mac_change_chanctx_arg *arg = data; + struct ieee80211_bss_conf *link_conf; + struct ath12k_link_vif *arvif; + unsigned long links_map; + u8 link_id; - if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) - return; + lockdep_assert_wiphy(ahvif->ah->hw->wiphy); - arg->n_vifs++; + links_map = ahvif->links_map; + for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]); + if (WARN_ON(!arvif)) + continue; + + if (arvif->ar != arg->ar) + continue; + + link_conf = wiphy_dereference(ahvif->ah->hw->wiphy, + vif->link_conf[link_id]); + if (WARN_ON(!link_conf)) + continue; + + if (rcu_access_pointer(link_conf->chanctx_conf) != arg->ctx) + continue; + + arg->n_vifs++; + } } static void ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_mac_change_chanctx_arg *arg = data; + struct ieee80211_bss_conf *link_conf; struct ieee80211_chanctx_conf *ctx; + struct ath12k_link_vif *arvif; + unsigned long links_map; + u8 link_id; - ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); - if (ctx != arg->ctx) - return; + lockdep_assert_wiphy(ahvif->ah->hw->wiphy); - if (WARN_ON(arg->next_vif == arg->n_vifs)) - return; + links_map = ahvif->links_map; + for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]); + if (WARN_ON(!arvif)) + continue; - arg->vifs[arg->next_vif].vif = vif; - arg->vifs[arg->next_vif].old_ctx = ctx; - arg->vifs[arg->next_vif].new_ctx = ctx; - arg->next_vif++; + if (arvif->ar != arg->ar) + continue; + + link_conf = wiphy_dereference(ahvif->ah->hw->wiphy, + vif->link_conf[arvif->link_id]); + if (WARN_ON(!link_conf)) + continue; + + ctx = rcu_access_pointer(link_conf->chanctx_conf); + if (ctx != arg->ctx) + continue; + + if (WARN_ON(arg->next_vif == arg->n_vifs)) + return; + + arg->vifs[arg->next_vif].vif = vif; + arg->vifs[arg->next_vif].old_ctx = ctx; + arg->vifs[arg->next_vif].new_ctx = ctx; + arg->vifs[arg->next_vif].link_conf = link_conf; + arg->next_vif++; + } +} + +static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width) +{ + switch (width) { + case NL80211_CHAN_WIDTH_20: + return WMI_CHAN_WIDTH_20; + case NL80211_CHAN_WIDTH_40: + return WMI_CHAN_WIDTH_40; + case NL80211_CHAN_WIDTH_80: + return WMI_CHAN_WIDTH_80; + case NL80211_CHAN_WIDTH_160: + return WMI_CHAN_WIDTH_160; + case NL80211_CHAN_WIDTH_80P80: + return WMI_CHAN_WIDTH_80P80; + case NL80211_CHAN_WIDTH_5: + return WMI_CHAN_WIDTH_5; + case NL80211_CHAN_WIDTH_10: + return WMI_CHAN_WIDTH_10; + case NL80211_CHAN_WIDTH_320: + return WMI_CHAN_WIDTH_320; + default: + WARN_ON(1); + return WMI_CHAN_WIDTH_20; + } +} + +static int ath12k_mac_update_peer_puncturing_width(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct cfg80211_chan_def def) +{ + u32 param_id, param_value; + int ret; + + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + param_id = WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP; + param_value = ath12k_mac_nlwidth_to_wmiwidth(def.width) | + u32_encode_bits((~def.punctured), + WMI_PEER_PUNCTURE_BITMAP); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "punctured bitmap %02x width %d vdev %d\n", + def.punctured, def.width, arvif->vdev_id); + + ret = ath12k_wmi_set_peer_param(ar, arvif->bssid, + arvif->vdev_id, param_id, + param_value); + + return ret; } static void @@ -5992,18 +8981,28 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs) { + struct ath12k_wmi_vdev_up_params params = {}; + struct ieee80211_bss_conf *link_conf; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; + struct ieee80211_vif *vif; + struct ath12k_vif *ahvif; + u8 link_id; int ret; int i; bool monitor_vif = false; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; - - if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) + vif = vifs[i].vif; + ahvif = ath12k_vif_to_ahvif(vif); + link_conf = vifs[i].link_conf; + link_id = link_conf->link_id; + arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, + ahvif->link[link_id]); + + if (vif->type == NL80211_IFTYPE_MONITOR) monitor_vif = true; ath12k_dbg(ab, ATH12K_DBG_MAC, @@ -6017,36 +9016,30 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, if (WARN_ON(!arvif->is_started)) continue; - if (WARN_ON(!arvif->is_up)) - continue; - - ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); - if (ret) { - ath12k_warn(ab, "failed to down vdev %d: %d\n", - arvif->vdev_id, ret); - continue; - } - } - - /* All relevant vdevs are downed and associated channel resources - * should be available for the channel switch now. - */ + arvif->punct_bitmap = vifs[i].new_ctx->def.punctured; - /* TODO: Update ar->rx_channel */ - - for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; - - if (WARN_ON(!arvif->is_started)) - continue; - - if (WARN_ON(!arvif->is_up)) - continue; + /* Firmware expect vdev_restart only if vdev is up. + * If vdev is down then it expect vdev_stop->vdev_start. + */ + if (arvif->is_up) { + ret = ath12k_mac_vdev_restart(arvif, vifs[i].new_ctx); + if (ret) { + ath12k_warn(ab, "failed to restart vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } + } else { + ret = ath12k_mac_vdev_stop(arvif); + if (ret) { + ath12k_warn(ab, "failed to stop vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } - ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def); - if (ret) { - ath12k_warn(ab, "failed to restart vdev %d: %d\n", - arvif->vdev_id, ret); + ret = ath12k_mac_vdev_start(arvif, vifs[i].new_ctx); + if (ret) + ath12k_warn(ab, "failed to start vdev %d: %d\n", + arvif->vdev_id, ret); continue; } @@ -6055,13 +9048,35 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n", ret); - ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, - arvif->bssid); + memset(¶ms, 0, sizeof(params)); + params.vdev_id = arvif->vdev_id; + params.aid = ahvif->aid; + params.bssid = arvif->bssid; + if (vif->mbssid_tx_vif) { + struct ath12k_vif *tx_ahvif = + ath12k_vif_to_ahvif(vif->mbssid_tx_vif); + struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink; + + params.tx_bssid = tx_arvif->bssid; + params.nontx_profile_idx = link_conf->bssid_index; + params.nontx_profile_cnt = 1 << link_conf->bssid_indicator; + } + ret = ath12k_wmi_vdev_up(arvif->ar, ¶ms); if (ret) { ath12k_warn(ab, "failed to bring vdev up %d: %d\n", arvif->vdev_id, ret); continue; } + + ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif, + vifs[i].new_ctx->def); + if (ret) { + ath12k_warn(ar->ab, + "failed to update puncturing bitmap %02x and width %d: %d\n", + vifs[i].new_ctx->def.punctured, + vifs[i].new_ctx->def.width, ret); + continue; + } } /* Restart the internal monitor vdev on new channel */ @@ -6075,11 +9090,12 @@ static void ath12k_mac_update_active_vif_chan(struct ath12k *ar, struct ieee80211_chanctx_conf *ctx) { - struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx }; + struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx, .ar = ar }; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - ieee80211_iterate_active_interfaces_atomic(ar->hw, + ieee80211_iterate_active_interfaces_atomic(hw, IEEE80211_IFACE_ITER_NORMAL, ath12k_mac_change_chanctx_cnt_iter, &arg); @@ -6090,7 +9106,7 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar, if (!arg.vifs) return; - ieee80211_iterate_active_interfaces_atomic(ar->hw, + ieee80211_iterate_active_interfaces_atomic(hw, IEEE80211_IFACE_ITER_NORMAL, ath12k_mac_change_chanctx_fill_iter, &arg); @@ -6104,42 +9120,47 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { - struct ath12k *ar = hw->priv; - struct ath12k_base *ab = ar->ab; + struct ath12k *ar; + struct ath12k_base *ab; + + lockdep_assert_wiphy(hw->wiphy); - mutex_lock(&ar->conf_mutex); + ar = ath12k_get_ar_by_ctx(hw, ctx); + if (!ar) + return; + + ab = ar->ab; ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx change freq %u width %d ptr %pK changed %x\n", + "mac chanctx change freq %u width %d ptr %p changed %x\n", ctx->def.chan->center_freq, ctx->def.width, ctx, changed); /* This shouldn't really happen because channel switching should use * switch_vif_chanctx(). */ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) - goto unlock; + return; - if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) + if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH || + changed & IEEE80211_CHANCTX_CHANGE_RADAR || + changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING) ath12k_mac_update_active_vif_chan(ar, ctx); /* TODO: Recalc radar detection */ - -unlock: - mutex_unlock(&ar->conf_mutex); } -static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int ath12k_start_vdev_delay(struct ath12k *ar, + struct ath12k_link_vif *arvif) { - struct ath12k *ar = hw->priv; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); int ret; if (WARN_ON(arvif->is_started)) return -EBUSY; - ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def); + ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx); if (ret) { ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, @@ -6147,7 +9168,7 @@ static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, return ret; } - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id); if (ret) { ath12k_warn(ab, "failed put monitor up: %d\n", ret); @@ -6167,24 +9188,44 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { - struct ath12k *ar = hw->priv; - struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + struct ath12k_base *ab; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + u8 link_id = link_conf->link_id; + struct ath12k_link_vif *arvif; int ret; - struct ath12k_wmi_peer_create_arg param; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); + + /* For multi radio wiphy, the vdev was not created during add_interface + * create now since we have a channel ctx now to assign to a specific ar/fw + */ + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + if (!arvif) { + WARN_ON(1); + return -ENOMEM; + } + + ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx); + if (!ar) { + ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started", + vif->addr, link_id); + return -EINVAL; + } + + ab = ar->ab; ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx assign ptr %pK vdev_id %i\n", + "mac chanctx assign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); - arvif->punct_bitmap = link_conf->eht_puncturing; + arvif->punct_bitmap = ctx->def.punctured; /* for some targets bss peer must be created before vdev_start */ if (ab->hw_params->vdev_start_delay && - arvif->vdev_type != WMI_VDEV_TYPE_AP && - arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && + ahvif->vdev_type != WMI_VDEV_TYPE_AP && + ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) { memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); ret = 0; @@ -6196,30 +9237,16 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, goto out; } - if (ab->hw_params->vdev_start_delay && - (arvif->vdev_type == WMI_VDEV_TYPE_AP || - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) { - param.vdev_id = arvif->vdev_id; - param.peer_type = WMI_PEER_TYPE_DEFAULT; - param.peer_addr = ar->mac_addr; - - ret = ath12k_peer_create(ar, arvif, NULL, ¶m); - if (ret) { - ath12k_warn(ab, "failed to create peer after vdev start delay: %d", - ret); - goto out; - } - } - - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath12k_mac_monitor_start(ar); if (ret) goto out; + arvif->is_started = true; goto out; } - ret = ath12k_mac_vdev_start(arvif, &ctx->def); + ret = ath12k_mac_vdev_start(arvif, ctx); if (ret) { ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, @@ -6227,7 +9254,7 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, goto out; } - if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created) + if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created) ath12k_mac_monitor_start(ar); arvif->is_started = true; @@ -6235,8 +9262,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, /* TODO: Setup ps and cts/rts protection */ out: - mutex_unlock(&ar->conf_mutex); - return ret; } @@ -6246,50 +9271,60 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { - struct ath12k *ar = hw->priv; - struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k *ar; + struct ath12k_base *ab; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + u8 link_id = link_conf->link_id; int ret; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + + /* The vif is expected to be attached to an ar's VDEV. + * We leave the vif/vdev in this function as is + * and not delete the vdev symmetric to assign_vif_chanctx() + * the VDEV will be deleted and unassigned either during + * remove_interface() or when there is a change in channel + * that moves the vif to a new ar + */ + if (!arvif || !arvif->is_created) + return; + + ar = arvif->ar; + ab = ar->ab; ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx unassign ptr %pK vdev_id %i\n", + "mac chanctx unassign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); WARN_ON(!arvif->is_started); - if (ab->hw_params->vdev_start_delay && - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && - ath12k_peer_find_by_addr(ab, ar->mac_addr)) - ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); - - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath12k_mac_monitor_stop(ar); - if (ret) { - mutex_unlock(&ar->conf_mutex); + if (ret) return; - } arvif->is_started = false; } - ret = ath12k_mac_vdev_stop(arvif); - if (ret) - ath12k_warn(ab, "failed to stop vdev %i: %d\n", - arvif->vdev_id, ret); - + if (ahvif->vdev_type != WMI_VDEV_TYPE_STA && + ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { + ath12k_bss_disassoc(ar, arvif); + ret = ath12k_mac_vdev_stop(arvif); + if (ret) + ath12k_warn(ab, "failed to stop vdev %i: %d\n", + arvif->vdev_id, ret); + } arvif->is_started = false; - if (ab->hw_params->vdev_start_delay && - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) - ath12k_wmi_vdev_down(ar, arvif->vdev_id); - - if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && + if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->num_started_vdevs == 1 && ar->monitor_vdev_created) ath12k_mac_monitor_stop(ar); - mutex_unlock(&ar->conf_mutex); + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); } static int @@ -6298,27 +9333,34 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { - struct ath12k *ar = hw->priv; + struct ath12k *ar; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); + + ar = ath12k_get_ar_by_ctx(hw, vifs->old_ctx); + if (!ar) + return -EINVAL; + + /* Switching channels across radio is not allowed */ + if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx)) + return -EINVAL; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac chanctx switch n_vifs %d mode %d\n", n_vifs, mode); ath12k_mac_update_vif_chan(ar, vifs, n_vifs); - mutex_unlock(&ar->conf_mutex); - return 0; } static int ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret = 0; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + list_for_each_entry(arvif, &ar->arvifs, list) { ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n", param, arvif->vdev_id, value); @@ -6331,7 +9373,7 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value) break; } } - mutex_unlock(&ar->conf_mutex); + return ret; } @@ -6340,10 +9382,27 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value) */ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { - struct ath12k *ar = hw->priv; - int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i; + + lockdep_assert_wiphy(hw->wiphy); - return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); + /* Currently we set the rts threshold value to all the vifs across + * all radios of the single wiphy. + * TODO Once support for vif specific RTS threshold in mac80211 is + * available, ath12k can make use of it. + */ + for_each_ar(ah, ar, i) { + ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); + if (ret) { + ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d", + ar->pdev->pdev_id); + break; + } + } + + return ret; } static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) @@ -6358,30 +9417,83 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) * supported. This effectively prevents mac80211 from doing frame * fragmentation in software. */ + + lockdep_assert_wiphy(hw->wiphy); + return -EOPNOTSUPP; } -static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u32 queues, bool drop) +static int ath12k_mac_flush(struct ath12k *ar) { - struct ath12k *ar = hw->priv; long time_left; - - if (drop) - return; + int ret = 0; time_left = wait_event_timeout(ar->dp.tx_empty_waitq, (atomic_read(&ar->dp.num_tx_pending) == 0), ATH12K_FLUSH_TIMEOUT); - if (time_left == 0) - ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left); + if (time_left == 0) { + ath12k_warn(ar->ab, + "failed to flush transmit queue, data pkts pending %d\n", + atomic_read(&ar->dp.num_tx_pending)); + ret = -ETIMEDOUT; + } time_left = wait_event_timeout(ar->txmgmt_empty_waitq, (atomic_read(&ar->num_pending_mgmt_tx) == 0), ATH12K_FLUSH_TIMEOUT); - if (time_left == 0) - ath12k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n", - time_left); + if (time_left == 0) { + ath12k_warn(ar->ab, + "failed to flush mgmt transmit queue, mgmt pkts pending %d\n", + atomic_read(&ar->num_pending_mgmt_tx)); + ret = -ETIMEDOUT; + } + + return ret; +} + +int ath12k_mac_wait_tx_complete(struct ath12k *ar) +{ + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + ath12k_mac_drain_tx(ar); + return ath12k_mac_flush(ar); +} + +static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_link_vif *arvif; + struct ath12k_vif *ahvif; + unsigned long links; + struct ath12k *ar; + u8 link_id; + int i; + + lockdep_assert_wiphy(hw->wiphy); + + if (drop) + return; + + /* vif can be NULL when flush() is considered for hw */ + if (!vif) { + for_each_ar(ah, ar, i) + ath12k_mac_flush(ar); + return; + } + + for_each_ar(ah, ar, i) + wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work); + + ahvif = ath12k_vif_to_ahvif(vif); + links = ahvif->links_map; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!(arvif && arvif->ar)) + continue; + + ath12k_mac_flush(arvif->ar); + } } static int @@ -6499,14 +9611,14 @@ ath12k_mac_get_single_legacy_rate(struct ath12k *ar, return 0; } -static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif, +static int ath12k_mac_set_fixed_rate_params(struct ath12k_link_vif *arvif, u32 rate, u8 nss, u8 sgi, u8 ldpc) { struct ath12k *ar = arvif->ar; u32 vdev_param; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n", arvif->vdev_id, rate, nss, sgi); @@ -6578,32 +9690,47 @@ ath12k_mac_vht_mcs_range_present(struct ath12k *ar, static void ath12k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { - struct ath12k_vif *arvif = data; - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_link_vif *arvif = data; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_link_sta *arsta; struct ath12k *ar = arvif->ar; + arsta = rcu_dereference(ahsta->link[arvif->link_id]); + if (!arsta || arsta->arvif != arvif) + return; + spin_lock_bh(&ar->data_lock); arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; spin_unlock_bh(&ar->data_lock); - ieee80211_queue_work(ar->hw, &arsta->update_wk); + wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &arsta->update_wk); } static void ath12k_mac_disable_peer_fixed_rate(void *data, struct ieee80211_sta *sta) { - struct ath12k_vif *arvif = data; + struct ath12k_link_vif *arvif = data; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_link_sta *arsta; struct ath12k *ar = arvif->ar; int ret; - ret = ath12k_wmi_set_peer_param(ar, sta->addr, + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, + ahsta->link[arvif->link_id]); + + if (!arsta || arsta->arvif != arvif) + return; + + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, WMI_FIXED_RATE_NONE); if (ret) ath12k_warn(ar->ab, "failed to disable peer fixed rate for STA %pM ret %d\n", - sta->addr, ret); + arsta->addr, ret); } static int @@ -6611,9 +9738,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; struct cfg80211_chan_def def; - struct ath12k *ar = arvif->ar; + struct ath12k *ar; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; @@ -6625,8 +9753,15 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, int ret; int num_rates; - if (ath12k_mac_vif_chan(vif, &def)) - return -EPERM; + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; + + ar = arvif->ar; + if (ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) { + ret = -EPERM; + goto out; + } band = def.chan->band; ht_mcs_mask = mask->control[band].ht_mcs; @@ -6634,8 +9769,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); sgi = mask->control[band].gi; - if (sgi == NL80211_TXRATE_FORCE_LGI) - return -EINVAL; + if (sgi == NL80211_TXRATE_FORCE_LGI) { + ret = -EINVAL; + goto out; + } /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it * requires passing at least one of used basic rates along with them. @@ -6651,11 +9788,11 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, if (ret) { ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n", arvif->vdev_id, ret); - return ret; + goto out; } - ieee80211_iterate_stations_atomic(ar->hw, - ath12k_mac_disable_peer_fixed_rate, - arvif); + ieee80211_iterate_stations_mtx(hw, + ath12k_mac_disable_peer_fixed_rate, + arvif); } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask, &single_nss)) { rate = WMI_FIXED_RATE_NONE; @@ -6699,30 +9836,23 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, return -EINVAL; } - ieee80211_iterate_stations_atomic(ar->hw, - ath12k_mac_disable_peer_fixed_rate, - arvif); - - mutex_lock(&ar->conf_mutex); + ieee80211_iterate_stations_mtx(hw, + ath12k_mac_disable_peer_fixed_rate, + arvif); arvif->bitrate_mask = *mask; - ieee80211_iterate_stations_atomic(ar->hw, - ath12k_mac_set_bitrate_mask_iter, - arvif); - - mutex_unlock(&ar->conf_mutex); + ieee80211_iterate_stations_mtx(hw, + ath12k_mac_set_bitrate_mask_iter, + arvif); } - mutex_lock(&ar->conf_mutex); - ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); if (ret) { ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n", arvif->vdev_id, ret); } - mutex_unlock(&ar->conf_mutex); - +out: return ret; } @@ -6730,26 +9860,38 @@ static void ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { - struct ath12k *ar = hw->priv; - struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif; - int recovery_count; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + struct ath12k_base *ab; + struct ath12k_vif *ahvif; + struct ath12k_link_vif *arvif; + int recovery_count, i; + + lockdep_assert_wiphy(hw->wiphy); if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) return; - mutex_lock(&ar->conf_mutex); + guard(mutex)(&ah->hw_mutex); + + if (ah->state != ATH12K_HW_STATE_RESTARTED) + return; + + ah->state = ATH12K_HW_STATE_ON; + ieee80211_wake_queues(hw); + + for_each_ar(ah, ar, i) { + ab = ar->ab; - if (ar->state == ATH12K_STATE_RESTARTED) { ath12k_warn(ar->ab, "pdev %d successfully recovered\n", ar->pdev->pdev_id); - ar->state = ATH12K_STATE_ON; - ieee80211_wake_queues(ar->hw); if (ab->is_reset) { recovery_count = atomic_inc_return(&ab->recovery_count); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n", recovery_count); + /* When there are multiple radios in an SOC, * the recovery has to be done for each radio */ @@ -6763,27 +9905,28 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, } list_for_each_entry(arvif, &ar->arvifs, list) { + ahvif = arvif->ahvif; ath12k_dbg(ab, ATH12K_DBG_BOOT, "reconfig cipher %d up %d vdev type %d\n", - arvif->key_cipher, + ahvif->key_cipher, arvif->is_up, - arvif->vdev_type); + ahvif->vdev_type); + /* After trigger disconnect, then upper layer will * trigger connect again, then the PN number of * upper layer will be reset to keep up with AP - * side, hence PN number mis-match will not happened. + * side, hence PN number mismatch will not happen. */ if (arvif->is_up && - arvif->vdev_type == WMI_VDEV_TYPE_STA && - arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) { - ieee80211_hw_restart_disconnect(arvif->vif); + ahvif->vdev_type == WMI_VDEV_TYPE_STA && + ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) { + ieee80211_hw_restart_disconnect(ahvif->vif); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "restart disconnect\n"); } } } - - mutex_unlock(&ar->conf_mutex); } static void @@ -6793,7 +9936,7 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar, int ret; enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) || ar->rx_channel != channel) @@ -6821,18 +9964,15 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar, static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { - struct ath12k *ar = hw->priv; + struct ath12k *ar; struct ieee80211_supported_band *sband; struct survey_info *ar_survey; - int ret = 0; + + lockdep_assert_wiphy(hw->wiphy); if (idx >= ATH12K_NUM_CHANS) return -ENOENT; - ar_survey = &ar->survey[idx]; - - mutex_lock(&ar->conf_mutex); - sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; @@ -6841,12 +9981,28 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, if (!sband) sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } - if (!sband || idx >= sband->n_channels) { - ret = -ENOENT; - goto exit; + if (!sband) + sband = hw->wiphy->bands[NL80211_BAND_6GHZ]; + + if (!sband || idx >= sband->n_channels) + return -ENOENT; + + ar = ath12k_mac_get_ar_by_chan(hw, &sband->channels[idx]); + if (!ar) { + if (sband->channels[idx].flags & IEEE80211_CHAN_DISABLED) { + memset(survey, 0, sizeof(*survey)); + return 0; + } + return -ENOENT; } + ar_survey = &ar->survey[idx]; + ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); spin_lock_bh(&ar->data_lock); @@ -6858,9 +10014,7 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, if (ar->rx_channel == survey->channel) survey->filled |= SURVEY_INFO_IN_USE; -exit: - mutex_unlock(&ar->conf_mutex); - return ret; + return 0; } static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, @@ -6868,7 +10022,12 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_link_sta *arsta; + + lockdep_assert_wiphy(hw->wiphy); + + arsta = &ahsta->deflink; sinfo->rx_duration = arsta->rx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); @@ -6897,6 +10056,203 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } +static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar; + + ar = ath12k_ah_to_ar(ah, 0); + + lockdep_assert_wiphy(hw->wiphy); + + spin_lock_bh(&ar->data_lock); + ar->scan.roc_notify = false; + spin_unlock_bh(&ar->data_lock); + + ath12k_scan_abort(ar); + + cancel_delayed_work_sync(&ar->scan.timeout); + wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk); + + return 0; +} + +static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_link_vif *arvif; + struct ath12k *ar; + u32 scan_time_msec; + bool create = true; + u8 link_id; + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq); + if (!ar) + return -EINVAL; + + /* check if any of the links of ML VIF is already started on + * radio(ar) correpsondig to given scan frequency and use it, + * if not use deflink(link 0) for scan purpose. + */ + + link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar); + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + /* If the vif is already assigned to a specific vdev of an ar, + * check whether its already started, vdev which is started + * are not allowed to switch to a new radio. + * If the vdev is not started, but was earlier created on a + * different ar, delete that vdev and create a new one. We don't + * delete at the scan stop as an optimization to avoid redundant + * delete-create vdev's for the same ar, in case the request is + * always on the same band for the vif + */ + if (arvif->is_created) { + if (WARN_ON(!arvif->ar)) + return -EINVAL; + + if (ar != arvif->ar && arvif->is_started) + return -EBUSY; + + if (ar != arvif->ar) { + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); + } else { + create = false; + } + } + + if (create) { + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + + ret = ath12k_mac_vdev_create(ar, arvif); + if (ret) { + ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n", + ret); + return ret; + } + } + + spin_lock_bh(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + reinit_completion(&ar->scan.on_channel); + ar->scan.state = ATH12K_SCAN_STARTING; + ar->scan.is_roc = true; + ar->scan.arvif = arvif; + ar->scan.roc_freq = chan->center_freq; + ar->scan.roc_notify = true; + ret = 0; + break; + case ATH12K_SCAN_STARTING: + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + ret = -EBUSY; + break; + } + + spin_unlock_bh(&ar->data_lock); + + if (ret) + return ret; + + scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2; + + struct ath12k_wmi_scan_req_arg *arg __free(kfree) = + kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) + return -ENOMEM; + + ath12k_wmi_start_scan_init(ar, arg); + arg->num_chan = 1; + + u32 *chan_list __free(kfree) = kcalloc(arg->num_chan, sizeof(*chan_list), + GFP_KERNEL); + if (!chan_list) + return -ENOMEM; + + arg->chan_list = chan_list; + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH12K_SCAN_ID; + arg->chan_list[0] = chan->center_freq; + arg->dwell_time_active = scan_time_msec; + arg->dwell_time_passive = scan_time_msec; + arg->max_scan_time = scan_time_msec; + arg->scan_f_passive = 1; + arg->burst_duration = duration; + + ret = ath12k_start_scan(ar, arg); + if (ret) { + ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret); + + spin_lock_bh(&ar->data_lock); + ar->scan.state = ATH12K_SCAN_IDLE; + spin_unlock_bh(&ar->data_lock); + return ret; + } + + ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); + if (ret == 0) { + ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n"); + ret = ath12k_scan_stop(ar); + if (ret) + ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret); + return -ETIMEDOUT; + } + + ieee80211_queue_delayed_work(hw, &ar->scan.timeout, + msecs_to_jiffies(duration)); + + return 0; +} + +static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_rekey_data *rekey_data; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; + rekey_data = &arvif->rekey_data; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rekey data vdev %d\n", + arvif->vdev_id); + + memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN); + memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN); + + /* The supplicant works on big-endian, the firmware expects it on + * little endian. + */ + rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr); + + arvif->rekey_data.enable_offload = true; + + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kck", NULL, + rekey_data->kck, NL80211_KCK_LEN); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kek", NULL, + rekey_data->kck, NL80211_KEK_LEN); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL, + &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr)); +} + static const struct ieee80211_ops ath12k_ops = { .tx = ath12k_mac_op_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, @@ -6907,14 +10263,17 @@ static const struct ieee80211_ops ath12k_ops = { .remove_interface = ath12k_mac_op_remove_interface, .update_vif_offload = ath12k_mac_op_update_vif_offload, .config = ath12k_mac_op_config, - .bss_info_changed = ath12k_mac_op_bss_info_changed, + .link_info_changed = ath12k_mac_op_link_info_changed, + .vif_cfg_changed = ath12k_mac_op_vif_cfg_changed, + .change_vif_links = ath12k_mac_op_change_vif_links, .configure_filter = ath12k_mac_op_configure_filter, .hw_scan = ath12k_mac_op_hw_scan, .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan, .set_key = ath12k_mac_op_set_key, + .set_rekey_data = ath12k_mac_op_set_rekey_data, .sta_state = ath12k_mac_op_sta_state, .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr, - .sta_rc_update = ath12k_mac_op_sta_rc_update, + .link_sta_rc_update = ath12k_mac_op_link_sta_rc_update, .conf_tx = ath12k_mac_op_conf_tx, .set_antenna = ath12k_mac_op_set_antenna, .get_antenna = ath12k_mac_op_get_antenna, @@ -6931,6 +10290,15 @@ static const struct ieee80211_ops ath12k_ops = { .get_survey = ath12k_mac_op_get_survey, .flush = ath12k_mac_op_flush, .sta_statistics = ath12k_mac_op_sta_statistics, + .remain_on_channel = ath12k_mac_op_remain_on_channel, + .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel, + .change_sta_links = ath12k_mac_op_change_sta_links, + .can_activate_links = ath12k_mac_op_can_activate_links, +#ifdef CONFIG_PM + .suspend = ath12k_wow_op_suspend, + .resume = ath12k_wow_op_resume, + .set_wakeup = ath12k_wow_op_set_wakeup, +#endif }; static void ath12k_mac_update_ch_list(struct ath12k *ar, @@ -6947,6 +10315,9 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar, band->channels[i].center_freq > freq_high) band->channels[i].flags |= IEEE80211_CHAN_DISABLED; } + + ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low); + ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high); } static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band) @@ -6966,10 +10337,12 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band) } static int ath12k_mac_setup_channels_rates(struct ath12k *ar, - u32 supported_bands) + u32 supported_bands, + struct ieee80211_supported_band *bands[]) { struct ieee80211_supported_band *band; struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap; + struct ath12k_hw *ah = ar->ah; void *channels; u32 phy_id; @@ -6993,7 +10366,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar, band->channels = channels; band->n_bitrates = ath12k_g_rates_size; band->bitrates = ath12k_g_rates; - ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; + bands[NL80211_BAND_2GHZ] = band; if (ar->ab->hw_params->single_pdev_only) { phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP); @@ -7005,7 +10378,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar, } if (supported_bands & WMI_HOST_WLAN_5G_CAP) { - if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) { + if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) { channels = kmemdup(ath12k_6ghz_channels, sizeof(ath12k_6ghz_channels), GFP_KERNEL); if (!channels) { @@ -7020,10 +10393,11 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar, band->channels = channels; band->n_bitrates = ath12k_a_rates_size; band->bitrates = ath12k_a_rates; - ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band; + bands[NL80211_BAND_6GHZ] = band; ath12k_mac_update_ch_list(ar, band, reg_cap->low_5ghz_chan, reg_cap->high_5ghz_chan); + ah->use_6ghz_regd = true; } if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) { @@ -7042,7 +10416,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar, band->channels = channels; band->n_bitrates = ath12k_a_rates_size; band->bitrates = ath12k_a_rates; - ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; + bands[NL80211_BAND_5GHZ] = band; if (ar->ab->hw_params->single_pdev_only) { phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); @@ -7058,83 +10432,290 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar, return 0; } -static int ath12k_mac_setup_iface_combinations(struct ath12k *ar) +static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah) { - struct ath12k_base *ab = ar->ab; - struct ieee80211_iface_combination *combinations; + struct ath12k *ar; + int i; + u16 interface_modes = U16_MAX; + + for_each_ar(ah, ar, i) + interface_modes &= ar->ab->hw_params->interface_modes; + + return interface_modes == U16_MAX ? 0 : interface_modes; +} + +static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah, + enum nl80211_iftype type) +{ + struct ath12k *ar; + int i; + u16 interface_modes, mode = 0; + bool is_enable = false; + + if (type == NL80211_IFTYPE_MESH_POINT) { + if (IS_ENABLED(CONFIG_MAC80211_MESH)) + mode = BIT(type); + } else { + mode = BIT(type); + } + + for_each_ar(ah, ar, i) { + interface_modes = ar->ab->hw_params->interface_modes; + if (interface_modes & mode) { + is_enable = true; + break; + } + } + + return is_enable; +} + +static int +ath12k_mac_setup_radio_iface_comb(struct ath12k *ar, + struct ieee80211_iface_combination *comb) +{ + u16 interface_modes = ar->ab->hw_params->interface_modes; struct ieee80211_iface_limit *limits; int n_limits, max_interfaces; - bool ap, mesh; + bool ap, mesh, p2p; - ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP); + ap = interface_modes & BIT(NL80211_IFTYPE_AP); + p2p = interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE); mesh = IS_ENABLED(CONFIG_MAC80211_MESH) && - ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT); + (interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)); - combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); - if (!combinations) - return -ENOMEM; - - if (ap || mesh) { + if ((ap || mesh) && !p2p) { n_limits = 2; max_interfaces = 16; + } else if (p2p) { + n_limits = 3; + if (ap || mesh) + max_interfaces = 16; + else + max_interfaces = 3; } else { n_limits = 1; max_interfaces = 1; } limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); - if (!limits) { - kfree(combinations); + if (!limits) return -ENOMEM; - } limits[0].max = 1; limits[0].types |= BIT(NL80211_IFTYPE_STATION); - if (ap) { + if (ap || mesh || p2p) limits[1].max = max_interfaces; + + if (ap) limits[1].types |= BIT(NL80211_IFTYPE_AP); - } if (mesh) limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); - combinations[0].limits = limits; - combinations[0].n_limits = n_limits; - combinations[0].max_interfaces = max_interfaces; - combinations[0].num_different_channels = 1; - combinations[0].beacon_int_infra_match = true; - combinations[0].beacon_int_min_gcd = 100; - combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80); + if (p2p) { + limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + limits[2].max = 1; + limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE); + } - ar->hw->wiphy->iface_combinations = combinations; - ar->hw->wiphy->n_iface_combinations = 1; + comb[0].limits = limits; + comb[0].n_limits = n_limits; + comb[0].max_interfaces = max_interfaces; + comb[0].num_different_channels = 1; + comb[0].beacon_int_infra_match = true; + comb[0].beacon_int_min_gcd = 100; + comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80); return 0; } +static int +ath12k_mac_setup_global_iface_comb(struct ath12k_hw *ah, + struct wiphy_radio *radio, + u8 n_radio, + struct ieee80211_iface_combination *comb) +{ + const struct ieee80211_iface_combination *iter_comb; + struct ieee80211_iface_limit *limits; + int i, j, n_limits; + bool ap, mesh, p2p; + + if (!n_radio) + return 0; + + ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP); + p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE); + mesh = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT); + + if ((ap || mesh) && !p2p) + n_limits = 2; + else if (p2p) + n_limits = 3; + else + n_limits = 1; + + limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); + if (!limits) + return -ENOMEM; + + for (i = 0; i < n_radio; i++) { + iter_comb = radio[i].iface_combinations; + for (j = 0; j < iter_comb->n_limits && j < n_limits; j++) { + limits[j].types |= iter_comb->limits[j].types; + limits[j].max += iter_comb->limits[j].max; + } + + comb->max_interfaces += iter_comb->max_interfaces; + comb->num_different_channels += iter_comb->num_different_channels; + comb->radar_detect_widths |= iter_comb->radar_detect_widths; + } + + comb->limits = limits; + comb->n_limits = n_limits; + comb->beacon_int_infra_match = true; + comb->beacon_int_min_gcd = 100; + + return 0; +} + +static +void ath12k_mac_cleanup_iface_comb(const struct ieee80211_iface_combination *iface_comb) +{ + kfree(iface_comb[0].limits); + kfree(iface_comb); +} + +static void ath12k_mac_cleanup_iface_combinations(struct ath12k_hw *ah) +{ + struct wiphy *wiphy = ah->hw->wiphy; + const struct wiphy_radio *radio; + int i; + + if (wiphy->n_radio > 0) { + radio = wiphy->radio; + for (i = 0; i < wiphy->n_radio; i++) + ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations); + + kfree(wiphy->radio); + } + + ath12k_mac_cleanup_iface_comb(wiphy->iface_combinations); +} + +static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah) +{ + struct ieee80211_iface_combination *combinations, *comb; + struct wiphy *wiphy = ah->hw->wiphy; + struct wiphy_radio *radio; + struct ath12k *ar; + int i, ret; + + combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); + if (!combinations) + return -ENOMEM; + + if (ah->num_radio == 1) { + ret = ath12k_mac_setup_radio_iface_comb(&ah->radio[0], + combinations); + if (ret) { + ath12k_hw_warn(ah, "failed to setup radio interface combinations for one radio: %d", + ret); + goto err_free_combinations; + } + + goto out; + } + + /* there are multiple radios */ + + radio = kcalloc(ah->num_radio, sizeof(*radio), GFP_KERNEL); + if (!radio) { + ret = -ENOMEM; + goto err_free_combinations; + } + + for_each_ar(ah, ar, i) { + comb = kzalloc(sizeof(*comb), GFP_KERNEL); + if (!comb) { + ret = -ENOMEM; + goto err_free_radios; + } + + ret = ath12k_mac_setup_radio_iface_comb(ar, comb); + if (ret) { + ath12k_hw_warn(ah, "failed to setup radio interface combinations for radio %d: %d", + i, ret); + kfree(comb); + goto err_free_radios; + } + + radio[i].freq_range = &ar->freq_range; + radio[i].n_freq_range = 1; + + radio[i].iface_combinations = comb; + radio[i].n_iface_combinations = 1; + } + + ret = ath12k_mac_setup_global_iface_comb(ah, radio, ah->num_radio, combinations); + if (ret) { + ath12k_hw_warn(ah, "failed to setup global interface combinations: %d", + ret); + goto err_free_all_radios; + } + + wiphy->radio = radio; + wiphy->n_radio = ah->num_radio; + +out: + wiphy->iface_combinations = combinations; + wiphy->n_iface_combinations = 1; + + return 0; + +err_free_all_radios: + i = ah->num_radio; + +err_free_radios: + while (i--) + ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations); + + kfree(radio); + +err_free_combinations: + kfree(combinations); + + return ret; +} + static const u8 ath12k_if_types_ext_capa[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, }; static const u8 ath12k_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const u8 ath12k_if_types_ext_capa_ap[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT, + [10] = WLAN_EXT_CAPA11_EMA_SUPPORT, }; -static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { +static struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { { .extended_capabilities = ath12k_if_types_ext_capa, .extended_capabilities_mask = ath12k_if_types_ext_capa, @@ -7151,48 +10732,78 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { .extended_capabilities_mask = ath12k_if_types_ext_capa_ap, .extended_capabilities_len = sizeof(ath12k_if_types_ext_capa_ap), + .eml_capabilities = 0, + .mld_capa_and_ops = 0, }, }; -static void __ath12k_mac_unregister(struct ath12k *ar) +static void ath12k_mac_cleanup_unregister(struct ath12k *ar) { - cancel_work_sync(&ar->regd_update_work); - - ieee80211_unregister_hw(ar->hw); - idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar); idr_destroy(&ar->txmgmt_idr); kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); - - kfree(ar->hw->wiphy->iface_combinations[0].limits); - kfree(ar->hw->wiphy->iface_combinations); - - SET_IEEE80211_DEV(ar->hw, NULL); } -void ath12k_mac_unregister(struct ath12k_base *ab) +static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) { + struct ieee80211_hw *hw = ah->hw; struct ath12k *ar; - struct ath12k_pdev *pdev; int i; - for (i = 0; i < ab->num_radios; i++) { - pdev = &ab->pdevs[i]; - ar = pdev->ar; - if (!ar) - continue; - - __ath12k_mac_unregister(ar); + for_each_ar(ah, ar, i) { + cancel_work_sync(&ar->regd_update_work); + ath12k_debugfs_unregister(ar); } + + ieee80211_unregister_hw(hw); + + for_each_ar(ah, ar, i) + ath12k_mac_cleanup_unregister(ar); + + ath12k_mac_cleanup_iface_combinations(ah); + + SET_IEEE80211_DEV(hw, NULL); } -static int __ath12k_mac_register(struct ath12k *ar) +static int ath12k_mac_setup_register(struct ath12k *ar, + u32 *ht_cap, + struct ieee80211_supported_band *bands[]) { - struct ath12k_base *ab = ar->ab; struct ath12k_pdev_cap *cap = &ar->pdev->cap; + int ret; + + init_waitqueue_head(&ar->txmgmt_empty_waitq); + idr_init(&ar->txmgmt_idr); + spin_lock_init(&ar->txmgmt_idr_lock); + + ath12k_pdev_caps_update(ar); + + ret = ath12k_mac_setup_channels_rates(ar, + cap->supported_bands, + bands); + if (ret) + return ret; + + ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap); + ath12k_mac_setup_sband_iftype_data(ar, cap); + + ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab); + ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab); + + return 0; +} + +static int ath12k_mac_hw_register(struct ath12k_hw *ah) +{ + struct ieee80211_hw *hw = ah->hw; + struct wiphy *wiphy = hw->wiphy; + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + struct ath12k_base *ab = ar->ab; + struct ath12k_pdev *pdev; + struct ath12k_pdev_cap *cap; static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, @@ -7204,286 +10815,688 @@ static int __ath12k_mac_register(struct ath12k *ar) WLAN_CIPHER_SUITE_GCMP_256, WLAN_CIPHER_SUITE_CCMP_256, }; - int ret; - u32 ht_cap = 0; + int ret, i, j; + u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0; + bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false; + u8 *mac_addr = NULL; + u8 mbssid_max_interfaces = 0; - ath12k_pdev_caps_update(ar); + wiphy->max_ap_assoc_sta = 0; - SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); + for_each_ar(ah, ar, i) { + u32 ht_cap_info = 0; - SET_IEEE80211_DEV(ar->hw, ab->dev); + pdev = ar->pdev; + if (ar->ab->pdevs_macaddr_valid) { + ether_addr_copy(ar->mac_addr, pdev->mac_addr); + } else { + ether_addr_copy(ar->mac_addr, ar->ab->mac_addr); + ar->mac_addr[4] += ar->pdev_idx; + } - ret = ath12k_mac_setup_channels_rates(ar, - cap->supported_bands); - if (ret) - goto err; + ret = ath12k_mac_setup_register(ar, &ht_cap_info, hw->wiphy->bands); + if (ret) + goto err_cleanup_unregister; - ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); - ath12k_mac_setup_sband_iftype_data(ar, cap); + /* 6 GHz does not support HT Cap, hence do not consider it */ + if (!ar->supports_6ghz) + ht_cap &= ht_cap_info; - ret = ath12k_mac_setup_iface_combinations(ar); - if (ret) { - ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret); - goto err_free_channels; - } + wiphy->max_ap_assoc_sta += ar->max_num_stations; - ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask; - ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask; + /* Advertise the max antenna support of all radios, driver can handle + * per pdev specific antenna setting based on pdev cap when antenna + * changes are made + */ + cap = &pdev->cap; + + antennas_rx = max_t(u32, antennas_rx, cap->rx_chain_mask); + antennas_tx = max_t(u32, antennas_tx, cap->tx_chain_mask); + + if (ar->supports_6ghz) + is_6ghz = true; - ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes; + if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) + is_raw_mode = true; - ieee80211_hw_set(ar->hw, SIGNAL_DBM); - ieee80211_hw_set(ar->hw, SUPPORTS_PS); - ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); - ieee80211_hw_set(ar->hw, MFP_CAPABLE); - ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); - ieee80211_hw_set(ar->hw, AP_LINK_PS); - ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); - ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); - ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); - ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); - ieee80211_hw_set(ar->hw, QUEUE_CONTROL); - ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); - ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); + if (!ar->ab->hw_params->supports_monitor) + is_monitor_disable = true; + + if (i == 0) + mac_addr = ar->mac_addr; + else + mac_addr = ab->mac_addr; - if (ht_cap & WMI_HT_CAP_ENABLED) { - ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); - ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); - ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER); - ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU); - ieee80211_hw_set(ar->hw, USES_RSS); + mbssid_max_interfaces += TARGET_NUM_VDEVS; } - ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; - ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + wiphy->available_antennas_rx = antennas_rx; + wiphy->available_antennas_tx = antennas_tx; + + SET_IEEE80211_PERM_ADDR(hw, mac_addr); + SET_IEEE80211_DEV(hw, ab->dev); + + ret = ath12k_mac_setup_iface_combinations(ah); + if (ret) { + ath12k_err(ab, "failed to setup interface combinations: %d\n", ret); + goto err_complete_cleanup_unregister; + } + + wiphy->interface_modes = ath12k_mac_get_ifmodes(ah); + + if (ah->num_radio == 1 && + wiphy->bands[NL80211_BAND_2GHZ] && + wiphy->bands[NL80211_BAND_5GHZ] && + wiphy->bands[NL80211_BAND_6GHZ]) + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); + + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(hw, MFP_CAPABLE); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + ieee80211_hw_set(hw, AP_LINK_PS); + ieee80211_hw_set(hw, SPECTRUM_MGMT); + ieee80211_hw_set(hw, CONNECTION_MONITOR); + ieee80211_hw_set(hw, SUPPORTS_PER_STA_GTK); + ieee80211_hw_set(hw, CHANCTX_STA_CSA); + ieee80211_hw_set(hw, QUEUE_CONTROL); + ieee80211_hw_set(hw, SUPPORTS_TX_FRAG); + ieee80211_hw_set(hw, REPORTS_LOW_ACK); + + if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) { + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(hw, USES_RSS); + } + + wiphy->features |= NL80211_FEATURE_STATIC_SMPS; + wiphy->flags |= WIPHY_FLAG_IBSS_RSN; /* TODO: Check if HT capability advertised from firmware is different * for each band for a dual band capable radio. It will be tricky to * handle it when the ht capability different for each band. */ - if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) - ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; + if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || + (is_6ghz && ab->hw_params->supports_dynamic_smps_6ghz)) + wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; - ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; - ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; + wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; + wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; - ar->hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL; + hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL; - ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; - ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; - ar->hw->wiphy->max_remain_on_channel_duration = 5000; + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + wiphy->max_remain_on_channel_duration = 5000; - ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; - ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_AP_SCAN; - ar->max_num_stations = TARGET_NUM_STATIONS; - ar->max_num_peers = TARGET_NUM_PEERS_PDEV; + /* MLO is not yet supported so disable Wireless Extensions for now + * to make sure ath12k users don't use it. This flag can be removed + * once WIPHY_FLAG_SUPPORTS_MLO is enabled. + */ + wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; + + /* Copy over MLO related capabilities received from + * WMI_SERVICE_READY_EXT2_EVENT if single_chip_mlo_supp is set. + */ + if (ab->ag->mlo_capable) { + ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap; + ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap; + wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + } - ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; + hw->queues = ATH12K_HW_MAX_QUEUES; + wiphy->tx_queue_len = ATH12K_QUEUE_LEN; + hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1; + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; - ar->hw->queues = ATH12K_HW_MAX_QUEUES; - ar->hw->wiphy->tx_queue_len = ATH12K_QUEUE_LEN; - ar->hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1; - ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + hw->vif_data_size = sizeof(struct ath12k_vif); + hw->sta_data_size = sizeof(struct ath12k_sta); + hw->extra_tx_headroom = ab->hw_params->iova_mask; - ar->hw->vif_data_size = sizeof(struct ath12k_vif); - ar->hw->sta_data_size = sizeof(struct ath12k_sta); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); - wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); - wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); + wiphy->cipher_suites = cipher_suites; + wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); - ar->hw->wiphy->cipher_suites = cipher_suites; - ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; + wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa); - ar->hw->wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; - ar->hw->wiphy->num_iftype_ext_capab = - ARRAY_SIZE(ath12k_iftypes_ext_capa); + wiphy->mbssid_max_interfaces = mbssid_max_interfaces; + wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD; - if (ar->supports_6ghz) { - wiphy_ext_feature_set(ar->hw->wiphy, + if (is_6ghz) { + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); - wiphy_ext_feature_set(ar->hw->wiphy, + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); } - wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_PUNCT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PUNCT); + + ath12k_reg_init(hw); + + if (!is_raw_mode) { + hw->netdev_features = NETIF_F_HW_CSUM; + ieee80211_hw_set(hw, SW_CRYPTO_CONTROL); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + } - ath12k_reg_init(ar); + if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { + wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; + wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; + wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; + wiphy->max_sched_scan_plan_interval = + WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; + wiphy->max_sched_scan_plan_iterations = + WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; + wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; + } - if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) { - ar->hw->netdev_features = NETIF_F_HW_CSUM; - ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); - ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); + ret = ath12k_wow_init(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to init wow: %d\n", ret); + goto err_cleanup_if_combs; } - ret = ieee80211_register_hw(ar->hw); + ret = ieee80211_register_hw(hw); if (ret) { - ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret); - goto err_free_if_combs; + ath12k_err(ab, "ieee80211 registration failed: %d\n", ret); + goto err_cleanup_if_combs; } - if (!ab->hw_params->supports_monitor) + if (is_monitor_disable) /* There's a race between calling ieee80211_register_hw() * and here where the monitor mode is enabled for a little * while. But that time is so short and in practise it make * a difference in real life. */ - ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); + wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); - /* Apply the regd received during initialization */ - ret = ath12k_regd_update(ar, true); - if (ret) { - ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); - goto err_unregister_hw; + for_each_ar(ah, ar, i) { + /* Apply the regd received during initialization */ + ret = ath12k_regd_update(ar, true); + if (ret) { + ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); + goto err_unregister_hw; + } + + ath12k_debugfs_register(ar); } return 0; err_unregister_hw: - ieee80211_unregister_hw(ar->hw); + for_each_ar(ah, ar, i) + ath12k_debugfs_unregister(ar); -err_free_if_combs: - kfree(ar->hw->wiphy->iface_combinations[0].limits); - kfree(ar->hw->wiphy->iface_combinations); + ieee80211_unregister_hw(hw); -err_free_channels: - kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); - kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); - kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); +err_cleanup_if_combs: + ath12k_mac_cleanup_iface_combinations(ah); + +err_complete_cleanup_unregister: + i = ah->num_radio; + +err_cleanup_unregister: + for (j = 0; j < i; j++) { + ar = ath12k_ah_to_ar(ah, j); + ath12k_mac_cleanup_unregister(ar); + } + + SET_IEEE80211_DEV(hw, NULL); -err: - SET_IEEE80211_DEV(ar->hw, NULL); return ret; } -int ath12k_mac_register(struct ath12k_base *ab) +static void ath12k_mac_setup(struct ath12k *ar) { - struct ath12k *ar; + struct ath12k_base *ab = ar->ab; + struct ath12k_pdev *pdev = ar->pdev; + u8 pdev_idx = ar->pdev_idx; + + ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx); + + ar->wmi = &ab->wmi_ab.wmi[pdev_idx]; + /* FIXME: wmi[0] is already initialized during attach, + * Should we do this again? + */ + ath12k_wmi_pdev_attach(ab, pdev_idx); + + ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; + ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; + ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask); + ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask); + ar->scan.arvif = NULL; + + spin_lock_init(&ar->data_lock); + INIT_LIST_HEAD(&ar->arvifs); + INIT_LIST_HEAD(&ar->ppdu_stats_info); + + init_completion(&ar->vdev_setup_done); + init_completion(&ar->vdev_delete_done); + init_completion(&ar->peer_assoc_done); + init_completion(&ar->peer_delete_done); + init_completion(&ar->install_key_done); + init_completion(&ar->bss_survey_done); + init_completion(&ar->scan.started); + init_completion(&ar->scan.completed); + init_completion(&ar->scan.on_channel); + init_completion(&ar->mlo_setup_done); + + INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); + wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work); + INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); + + wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); + skb_queue_head_init(&ar->wmi_mgmt_tx_queue); +} + +static int __ath12k_mac_mlo_setup(struct ath12k *ar) +{ + u8 num_link = 0, partner_link_id[ATH12K_GROUP_MAX_RADIO] = {}; + struct ath12k_base *partner_ab, *ab = ar->ab; + struct ath12k_hw_group *ag = ab->ag; + struct wmi_mlo_setup_arg mlo = {}; struct ath12k_pdev *pdev; - int i; + unsigned long time_left; + int i, j, ret; + + lockdep_assert_held(&ag->mutex); + + reinit_completion(&ar->mlo_setup_done); + + for (i = 0; i < ag->num_devices; i++) { + partner_ab = ag->ab[i]; + + for (j = 0; j < partner_ab->num_radios; j++) { + pdev = &partner_ab->pdevs[j]; + + /* Avoid the self link */ + if (ar == pdev->ar) + continue; + + partner_link_id[num_link] = pdev->hw_link_id; + num_link++; + + ath12k_dbg(ab, ATH12K_DBG_MAC, "device %d pdev %d hw_link_id %d num_link %d\n", + i, j, pdev->hw_link_id, num_link); + } + } + + mlo.group_id = cpu_to_le32(ag->id); + mlo.partner_link_id = partner_link_id; + mlo.num_partner_links = num_link; + ar->mlo_setup_status = 0; + + ath12k_dbg(ab, ATH12K_DBG_MAC, "group id %d num_link %d\n", ag->id, num_link); + + ret = ath12k_wmi_mlo_setup(ar, &mlo); + if (ret) { + ath12k_err(ab, "failed to send setup MLO WMI command for pdev %d: %d\n", + ar->pdev_idx, ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->mlo_setup_done, + WMI_MLO_CMD_TIMEOUT_HZ); + + if (!time_left || ar->mlo_setup_status) + return ar->mlo_setup_status ? : -ETIMEDOUT; + + ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo setup done for pdev %d\n", ar->pdev_idx); + + return 0; +} + +static int __ath12k_mac_mlo_teardown(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; int ret; - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) + if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) return 0; - for (i = 0; i < ab->num_radios; i++) { - pdev = &ab->pdevs[i]; - ar = pdev->ar; - if (ab->pdevs_macaddr_valid) { - ether_addr_copy(ar->mac_addr, pdev->mac_addr); - } else { - ether_addr_copy(ar->mac_addr, ab->mac_addr); - ar->mac_addr[4] += i; + ret = ath12k_wmi_mlo_teardown(ar); + if (ret) { + ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n", + ar->pdev_idx, ret); + return ret; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo teardown for pdev %d\n", ar->pdev_idx); + + return 0; +} + +int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag) +{ + struct ath12k_hw *ah; + struct ath12k *ar; + int ret; + int i, j; + + for (i = 0; i < ag->num_hw; i++) { + ah = ag->ah[i]; + if (!ah) + continue; + + for_each_ar(ah, ar, j) { + ar = &ah->radio[j]; + ret = __ath12k_mac_mlo_setup(ar); + if (ret) { + ath12k_err(ar->ab, "failed to setup MLO: %d\n", ret); + goto err_setup; + } } + } - ret = __ath12k_mac_register(ar); - if (ret) - goto err_cleanup; + return 0; - init_waitqueue_head(&ar->txmgmt_empty_waitq); - idr_init(&ar->txmgmt_idr); - spin_lock_init(&ar->txmgmt_idr_lock); +err_setup: + for (i = i - 1; i >= 0; i--) { + ah = ag->ah[i]; + if (!ah) + continue; + + for (j = j - 1; j >= 0; j--) { + ar = &ah->radio[j]; + if (!ar) + continue; + + __ath12k_mac_mlo_teardown(ar); + } } - /* Initialize channel counters frequency value in hertz */ - ab->cc_freq_hz = 320000; - ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; + return ret; +} + +void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag) +{ + struct ath12k_hw *ah; + struct ath12k *ar; + int ret, i, j; + + for (i = 0; i < ag->num_hw; i++) { + ah = ag->ah[i]; + if (!ah) + continue; + + for_each_ar(ah, ar, j) { + ar = &ah->radio[j]; + ret = __ath12k_mac_mlo_teardown(ar); + if (ret) { + ath12k_err(ar->ab, "failed to teardown MLO: %d\n", ret); + break; + } + } + } +} + +int ath12k_mac_register(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab = ag->ab[0]; + struct ath12k_hw *ah; + int i; + int ret; + + for (i = 0; i < ag->num_hw; i++) { + ah = ath12k_ag_to_ah(ag, i); + + ret = ath12k_mac_hw_register(ah); + if (ret) + goto err; + } + + set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); return 0; -err_cleanup: +err: for (i = i - 1; i >= 0; i--) { - pdev = &ab->pdevs[i]; - ar = pdev->ar; - __ath12k_mac_unregister(ar); + ah = ath12k_ag_to_ah(ag, i); + if (!ah) + continue; + + ath12k_mac_hw_unregister(ah); } return ret; } -int ath12k_mac_allocate(struct ath12k_base *ab) +void ath12k_mac_unregister(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab = ag->ab[0]; + struct ath12k_hw *ah; + int i; + + clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); + + for (i = ag->num_hw - 1; i >= 0; i--) { + ah = ath12k_ag_to_ah(ag, i); + if (!ah) + continue; + + ath12k_mac_hw_unregister(ah); + } +} + +static void ath12k_mac_hw_destroy(struct ath12k_hw *ah) +{ + ieee80211_free_hw(ah->hw); +} + +static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag, + struct ath12k_pdev_map *pdev_map, + u8 num_pdev_map) { struct ieee80211_hw *hw; struct ath12k *ar; + struct ath12k_base *ab; struct ath12k_pdev *pdev; - int ret; + struct ath12k_hw *ah; int i; + u8 pdev_idx; - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) - return 0; + hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map), + &ath12k_ops); + if (!hw) + return NULL; - for (i = 0; i < ab->num_radios; i++) { - pdev = &ab->pdevs[i]; - hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops); - if (!hw) { - ath12k_warn(ab, "failed to allocate mac80211 hw device\n"); - ret = -ENOMEM; - goto err_free_mac; - } + ah = ath12k_hw_to_ah(hw); + ah->hw = hw; + ah->num_radio = num_pdev_map; + + mutex_init(&ah->hw_mutex); + INIT_LIST_HEAD(&ah->ml_peers); + + for (i = 0; i < num_pdev_map; i++) { + ab = pdev_map[i].ab; + pdev_idx = pdev_map[i].pdev_idx; + pdev = &ab->pdevs[pdev_idx]; - ar = hw->priv; - ar->hw = hw; + ar = ath12k_ah_to_ar(ah, i); + ar->ah = ah; ar->ab = ab; + ar->hw_link_id = pdev->hw_link_id; ar->pdev = pdev; - ar->pdev_idx = i; - ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i); + ar->pdev_idx = pdev_idx; + pdev->ar = ar; - ar->wmi = &ab->wmi_ab.wmi[i]; - /* FIXME: wmi[0] is already initialized during attach, - * Should we do this again? - */ - ath12k_wmi_pdev_attach(ab, i); + ag->hw_links[ar->hw_link_id].device_id = ab->device_id; + ag->hw_links[ar->hw_link_id].pdev_idx = pdev_idx; - ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; - ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; - ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask); - ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask); + ath12k_mac_setup(ar); + ath12k_dp_pdev_pre_alloc(ar); + } - pdev->ar = ar; - spin_lock_init(&ar->data_lock); - INIT_LIST_HEAD(&ar->arvifs); - INIT_LIST_HEAD(&ar->ppdu_stats_info); - mutex_init(&ar->conf_mutex); - init_completion(&ar->vdev_setup_done); - init_completion(&ar->vdev_delete_done); - init_completion(&ar->peer_assoc_done); - init_completion(&ar->peer_delete_done); - init_completion(&ar->install_key_done); - init_completion(&ar->bss_survey_done); - init_completion(&ar->scan.started); - init_completion(&ar->scan.completed); - - INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); - INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); - - INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); - skb_queue_head_init(&ar->wmi_mgmt_tx_queue); - clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); + return ah; +} + +void ath12k_mac_destroy(struct ath12k_hw_group *ag) +{ + struct ath12k_pdev *pdev; + struct ath12k_base *ab = ag->ab[0]; + int i, j; + struct ath12k_hw *ah; + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + for (j = 0; j < ab->num_radios; j++) { + pdev = &ab->pdevs[j]; + if (!pdev->ar) + continue; + pdev->ar = NULL; + } + } + + for (i = 0; i < ag->num_hw; i++) { + ah = ath12k_ag_to_ah(ag, i); + if (!ah) + continue; + + ath12k_mac_hw_destroy(ah); + ath12k_ag_set_ah(ag, i, NULL); + } +} + +static void ath12k_mac_set_device_defaults(struct ath12k_base *ab) +{ + /* Initialize channel counters frequency value in hertz */ + ab->cc_freq_hz = 320000; + ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; +} + +int ath12k_mac_allocate(struct ath12k_hw_group *ag) +{ + struct ath12k_pdev_map pdev_map[ATH12K_GROUP_MAX_RADIO]; + int mac_id, device_id, total_radio, num_hw; + struct ath12k_base *ab; + struct ath12k_hw *ah; + int ret, i, j; + u8 radio_per_hw; + + total_radio = 0; + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + ath12k_mac_set_device_defaults(ab); + total_radio += ab->num_radios; + } + + if (!total_radio) + return -EINVAL; + + if (WARN_ON(total_radio > ATH12K_GROUP_MAX_RADIO)) + return -ENOSPC; + + /* All pdev get combined and register as single wiphy based on + * hardware group which participate in multi-link operation else + * each pdev get register separately. + */ + if (ag->mlo_capable) + radio_per_hw = total_radio; + else + radio_per_hw = 1; + + num_hw = total_radio / radio_per_hw; + + ag->num_hw = 0; + device_id = 0; + mac_id = 0; + for (i = 0; i < num_hw; i++) { + for (j = 0; j < radio_per_hw; j++) { + if (device_id >= ag->num_devices || !ag->ab[device_id]) { + ret = -ENOSPC; + goto err; + } + + ab = ag->ab[device_id]; + pdev_map[j].ab = ab; + pdev_map[j].pdev_idx = mac_id; + mac_id++; + + /* If mac_id falls beyond the current device MACs then + * move to next device + */ + if (mac_id >= ab->num_radios) { + mac_id = 0; + device_id++; + } + } + + ab = pdev_map->ab; + + ah = ath12k_mac_hw_allocate(ag, pdev_map, radio_per_hw); + if (!ah) { + ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n", + i); + ret = -ENOMEM; + goto err; + } + + ah->dev = ab->dev; + + ag->ah[i] = ah; + ag->num_hw++; } return 0; -err_free_mac: - ath12k_mac_destroy(ab); +err: + for (i = i - 1; i >= 0; i--) { + ah = ath12k_ag_to_ah(ag, i); + if (!ah) + continue; + + ath12k_mac_hw_destroy(ah); + ath12k_ag_set_ah(ag, i, NULL); + } return ret; } -void ath12k_mac_destroy(struct ath12k_base *ab) +int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif, + enum wmi_sta_keepalive_method method, + u32 interval) { - struct ath12k *ar; - struct ath12k_pdev *pdev; - int i; + struct wmi_sta_keepalive_arg arg = {}; + struct ath12k *ar = arvif->ar; + int ret; - for (i = 0; i < ab->num_radios; i++) { - pdev = &ab->pdevs[i]; - ar = pdev->ar; - if (!ar) - continue; + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; - ieee80211_free_hw(ar->hw); - pdev->ar = NULL; + if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map)) + return 0; + + arg.vdev_id = arvif->vdev_id; + arg.enabled = 1; + arg.method = method; + arg.interval = interval; + + ret = ath12k_wmi_sta_keepalive(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; } + + return 0; } @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_MAC_H @@ -9,9 +9,13 @@ #include <net/mac80211.h> #include <net/cfg80211.h> +#include "wmi.h" struct ath12k; struct ath12k_base; +struct ath12k_hw; +struct ath12k_hw_group; +struct ath12k_pdev_map; struct ath12k_generic_iter { struct ath12k *ar; @@ -38,19 +42,34 @@ struct ath12k_generic_iter { #define ATH12K_TX_POWER_MAX_VAL 70 #define ATH12K_TX_POWER_MIN_VAL 0 +#define ATH12K_DEFAULT_LINK_ID 0 +#define ATH12K_INVALID_LINK_ID 255 + +/* Default link after the IEEE802.11 defined Max link id limit + * for driver usage purpose. + */ +#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS +#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1) + enum ath12k_supported_bw { ATH12K_BW_20 = 0, ATH12K_BW_40 = 1, ATH12K_BW_80 = 2, ATH12K_BW_160 = 3, + ATH12K_BW_320 = 4, +}; + +struct ath12k_mac_get_any_chanctx_conf_arg { + struct ath12k *ar; + struct ieee80211_chanctx_conf *chanctx_conf; }; extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default; -void ath12k_mac_destroy(struct ath12k_base *ab); -void ath12k_mac_unregister(struct ath12k_base *ab); -int ath12k_mac_register(struct ath12k_base *ab); -int ath12k_mac_allocate(struct ath12k_base *ab); +void ath12k_mac_destroy(struct ath12k_hw_group *ag); +void ath12k_mac_unregister(struct ath12k_hw_group *ag); +int ath12k_mac_register(struct ath12k_hw_group *ag); +int ath12k_mac_allocate(struct ath12k_hw_group *ag); int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, u16 *rate); u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, @@ -61,9 +80,9 @@ u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, void __ath12k_mac_scan_finish(struct ath12k *ar); void ath12k_mac_scan_finish(struct ath12k *ar); -struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id); -struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, - u32 vdev_id); +struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id); +struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, + u32 vdev_id); struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id); struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id); @@ -73,4 +92,21 @@ int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx); enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw); enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw); enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher); +int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable); +int ath12k_mac_rfkill_config(struct ath12k *ar); +int ath12k_mac_wait_tx_complete(struct ath12k *ar); +void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb); +void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id); +int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif, + enum wmi_sta_keepalive_method method, + u32 interval); +u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar); +int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag); +int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag); +void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag); +int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif); +void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + void *data); + #endif @@ -1,11 +1,12 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/msi.h> #include <linux/pci.h> +#include <linux/firmware.h> #include "core.h" #include "debug.h" @@ -13,37 +14,12 @@ #include "pci.h" #define MHI_TIMEOUT_DEFAULT_MS 90000 +#define OTP_INVALID_BOARD_ID 0xFFFF +#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000 +#define MHI_CB_INVALID 0xff static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = { { - .num = 0, - .name = "LOOPBACK", - .num_elements = 32, - .event_ring = 1, - .dir = DMA_TO_DEVICE, - .ee_mask = 0x4, - .pollcfg = 0, - .doorbell = MHI_DB_BRST_DISABLE, - .lpm_notify = false, - .offload_channel = false, - .doorbell_mode_switch = false, - .auto_queue = false, - }, - { - .num = 1, - .name = "LOOPBACK", - .num_elements = 32, - .event_ring = 1, - .dir = DMA_FROM_DEVICE, - .ee_mask = 0x4, - .pollcfg = 0, - .doorbell = MHI_DB_BRST_DISABLE, - .lpm_notify = false, - .offload_channel = false, - .doorbell_mode_switch = false, - .auto_queue = false, - }, - { .num = 20, .name = "IPCR", .num_elements = 32, @@ -109,34 +85,6 @@ const struct mhi_controller_config ath12k_mhi_config_qcn9274 = { static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = { { - .num = 0, - .name = "LOOPBACK", - .num_elements = 32, - .event_ring = 0, - .dir = DMA_TO_DEVICE, - .ee_mask = 0x4, - .pollcfg = 0, - .doorbell = MHI_DB_BRST_DISABLE, - .lpm_notify = false, - .offload_channel = false, - .doorbell_mode_switch = false, - .auto_queue = false, - }, - { - .num = 1, - .name = "LOOPBACK", - .num_elements = 32, - .event_ring = 0, - .dir = DMA_FROM_DEVICE, - .ee_mask = 0x4, - .pollcfg = 0, - .doorbell = MHI_DB_BRST_DISABLE, - .lpm_notify = false, - .offload_channel = false, - .doorbell_mode_switch = false, - .auto_queue = false, - }, - { .num = 20, .name = "IPCR", .num_elements = 64, @@ -193,7 +141,7 @@ const struct mhi_controller_config ath12k_mhi_config_wcn7850 = { .max_channels = 128, .timeout_ms = 2000, .use_bounce_buf = false, - .buf_len = 0, + .buf_len = 8192, .num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850), .ch_cfg = ath12k_mhi_channels_wcn7850, .num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850), @@ -251,6 +199,7 @@ static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci) u32 user_base_data, base_vector; int ret, num_vectors, i; int *irq; + unsigned int msi_data; ret = ath12k_pci_get_user_msi_assignment(ab, "MHI", &num_vectors, @@ -265,9 +214,15 @@ static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci) if (!irq) return -ENOMEM; - for (i = 0; i < num_vectors; i++) - irq[i] = ath12k_pci_get_msi_irq(ab->dev, - base_vector + i); + msi_data = base_vector; + for (i = 0; i < num_vectors; i++) { + if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + irq[i] = ath12k_pci_get_msi_irq(ab->dev, + msi_data++); + else + irq[i] = ath12k_pci_get_msi_irq(ab->dev, + msi_data); + } ab_pci->mhi_ctrl->irq = irq; ab_pci->mhi_ctrl->nr_irqs = num_vectors; @@ -314,6 +269,7 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback cb) { struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev); + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n", ath12k_mhi_op_callback_to_str(cb)); @@ -323,12 +279,20 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); break; case MHI_CB_EE_RDDM: + if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "do not queue again for consecutive RDDM event\n"); + break; + } + if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) queue_work(ab->workqueue_aux, &ab->reset_work); break; default: break; } + + ab_pci->mhi_pre_cb = cb; } static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl, @@ -351,29 +315,69 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci) { struct ath12k_base *ab = ab_pci->ab; struct mhi_controller *mhi_ctrl; + unsigned int board_id; int ret; + bool dualmac = false; mhi_ctrl = mhi_alloc_controller(); if (!mhi_ctrl) return -ENOMEM; - ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE, - ab_pci->amss_path, - sizeof(ab_pci->amss_path)); - + ab_pci->mhi_pre_cb = MHI_CB_INVALID; ab_pci->mhi_ctrl = mhi_ctrl; mhi_ctrl->cntrl_dev = ab->dev; - mhi_ctrl->fw_image = ab_pci->amss_path; mhi_ctrl->regs = ab->mem; mhi_ctrl->reg_len = ab->mem_len; + mhi_ctrl->rddm_size = ab->hw_params->rddm_size; + + if (ab->hw_params->otp_board_id_register) { + board_id = + ath12k_pci_read32(ab, ab->hw_params->otp_board_id_register); + board_id = u32_get_bits(board_id, OTP_BOARD_ID_MASK); + + if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "failed to read board id\n"); + } else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) { + dualmac = true; + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "dualmac fw selected for board id: %x\n", board_id); + } + } + + if (dualmac) { + if (ab->fw.amss_dualmac_data && ab->fw.amss_dualmac_len > 0) { + /* use MHI firmware file from firmware-N.bin */ + mhi_ctrl->fw_data = ab->fw.amss_dualmac_data; + mhi_ctrl->fw_sz = ab->fw.amss_dualmac_len; + } else { + ath12k_warn(ab, "dualmac firmware IE not present in firmware-N.bin\n"); + ret = -ENOENT; + goto free_controller; + } + } else { + if (ab->fw.amss_data && ab->fw.amss_len > 0) { + /* use MHI firmware file from firmware-N.bin */ + mhi_ctrl->fw_data = ab->fw.amss_data; + mhi_ctrl->fw_sz = ab->fw.amss_len; + } else { + /* use the old separate mhi.bin MHI firmware file */ + ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE, + ab_pci->amss_path, + sizeof(ab_pci->amss_path)); + mhi_ctrl->fw_image = ab_pci->amss_path; + } + } ret = ath12k_mhi_get_msi(ab_pci); if (ret) { ath12k_err(ab, "failed to get msi for mhi\n"); - mhi_free_controller(mhi_ctrl); - return ret; + goto free_controller; } + if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; + mhi_ctrl->iova_start = 0; mhi_ctrl->iova_stop = 0xffffffff; mhi_ctrl->sbl_size = SZ_512K; @@ -388,11 +392,15 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci) ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config); if (ret) { ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret); - mhi_free_controller(mhi_ctrl); - return ret; + goto free_controller; } return 0; + +free_controller: + mhi_free_controller(mhi_ctrl); + ab_pci->mhi_ctrl = NULL; + return ret; } void ath12k_mhi_unregister(struct ath12k_pci *ab_pci) @@ -416,6 +424,8 @@ static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state) return "POWER_ON"; case ATH12K_MHI_POWER_OFF: return "POWER_OFF"; + case ATH12K_MHI_POWER_OFF_KEEP_DEV: + return "POWER_OFF_KEEP_DEV"; case ATH12K_MHI_FORCE_POWER_OFF: return "FORCE_POWER_OFF"; case ATH12K_MHI_SUSPEND: @@ -447,6 +457,7 @@ static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci, set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state); break; case ATH12K_MHI_POWER_OFF: + case ATH12K_MHI_POWER_OFF_KEEP_DEV: case ATH12K_MHI_FORCE_POWER_OFF: clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state); clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state); @@ -490,6 +501,7 @@ static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci, return 0; break; case ATH12K_MHI_POWER_OFF: + case ATH12K_MHI_POWER_OFF_KEEP_DEV: case ATH12K_MHI_SUSPEND: if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) && !test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state)) @@ -540,12 +552,27 @@ static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci, ret = 0; break; case ATH12K_MHI_POWER_ON: - ret = mhi_async_power_up(ab_pci->mhi_ctrl); + /* In case of resume, QRTR's resume_early() is called + * right after ath12k' resume_early(). Since QRTR requires + * MHI mission mode state when preparing IPCR channels + * (see ee_mask of that channel), we need to use the 'sync' + * version here to make sure MHI is in that state when we + * return. Or QRTR might resume before that state comes, + * and as a result it fails. + * + * The 'sync' version works for non-resume (normal power on) + * case as well. + */ + ret = mhi_sync_power_up(ab_pci->mhi_ctrl); break; case ATH12K_MHI_POWER_OFF: mhi_power_down(ab_pci->mhi_ctrl, true); ret = 0; break; + case ATH12K_MHI_POWER_OFF_KEEP_DEV: + mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true); + ret = 0; + break; case ATH12K_MHI_FORCE_POWER_OFF: mhi_power_down(ab_pci->mhi_ctrl, false); ret = 0; @@ -599,9 +626,17 @@ out: return ret; } -void ath12k_mhi_stop(struct ath12k_pci *ab_pci) +void ath12k_mhi_stop(struct ath12k_pci *ab_pci, bool is_suspend) { - ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF); + /* During suspend we need to use mhi_power_down_keep_dev() + * workaround, otherwise ath12k_core_resume() will timeout + * during resume. + */ + if (is_suspend) + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF_KEEP_DEV); + else + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF); + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT); } @@ -614,3 +649,8 @@ void ath12k_mhi_resume(struct ath12k_pci *ab_pci) { ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME); } + +void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic) +{ + mhi_download_rddm_image(mhi_ctrl, in_panic); +} @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH12K_MHI_H #define _ATH12K_MHI_H @@ -22,6 +22,7 @@ enum ath12k_mhi_state { ATH12K_MHI_DEINIT, ATH12K_MHI_POWER_ON, ATH12K_MHI_POWER_OFF, + ATH12K_MHI_POWER_OFF_KEEP_DEV, ATH12K_MHI_FORCE_POWER_OFF, ATH12K_MHI_SUSPEND, ATH12K_MHI_RESUME, @@ -34,7 +35,7 @@ extern const struct mhi_controller_config ath12k_mhi_config_qcn9274; extern const struct mhi_controller_config ath12k_mhi_config_wcn7850; int ath12k_mhi_start(struct ath12k_pci *ar_pci); -void ath12k_mhi_stop(struct ath12k_pci *ar_pci); +void ath12k_mhi_stop(struct ath12k_pci *ar_pci, bool is_suspend); int ath12k_mhi_register(struct ath12k_pci *ar_pci); void ath12k_mhi_unregister(struct ath12k_pci *ar_pci); void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab); @@ -42,5 +43,5 @@ void ath12k_mhi_clear_vector(struct ath12k_base *ab); void ath12k_mhi_suspend(struct ath12k_pci *ar_pci); void ath12k_mhi_resume(struct ath12k_pci *ar_pci); - +void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic); #endif diff --git a/p2p.c b/p2p.c new file mode 100644 index 000000000000..84cccf7d91e7 --- /dev/null +++ b/p2p.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <net/mac80211.h> +#include "core.h" +#include "mac.h" +#include "p2p.h" + +static void ath12k_p2p_noa_ie_fill(u8 *data, size_t len, + const struct ath12k_wmi_p2p_noa_info *noa) +{ + struct ieee80211_p2p_noa_attr *noa_attr; + u8 ctwindow = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU); + bool oppps = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS); + __le16 *noa_attr_len; + u16 attr_len; + u8 noa_descriptors = le32_get_bits(noa->noa_attr, + WMI_P2P_NOA_INFO_DESC_NUM); + int i; + + /* P2P IE */ + data[0] = WLAN_EID_VENDOR_SPECIFIC; + data[1] = len - 2; + data[2] = (WLAN_OUI_WFA >> 16) & 0xff; + data[3] = (WLAN_OUI_WFA >> 8) & 0xff; + data[4] = (WLAN_OUI_WFA >> 0) & 0xff; + data[5] = WLAN_OUI_TYPE_WFA_P2P; + + /* NOA ATTR */ + data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; + noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ + noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; + + noa_attr->index = le32_get_bits(noa->noa_attr, + WMI_P2P_NOA_INFO_INDEX); + noa_attr->oppps_ctwindow = ctwindow; + if (oppps) + noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; + + for (i = 0; i < noa_descriptors; i++) { + noa_attr->desc[i].count = + __le32_to_cpu(noa->descriptors[i].type_count); + noa_attr->desc[i].duration = noa->descriptors[i].duration; + noa_attr->desc[i].interval = noa->descriptors[i].interval; + noa_attr->desc[i].start_time = noa->descriptors[i].start_time; + } + + attr_len = 2; /* index + oppps_ctwindow */ + attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); + *noa_attr_len = __cpu_to_le16(attr_len); +} + +static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info *noa) +{ + size_t len = 0; + + if (!(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)) && + !(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS))) + return 0; + + len += 1 + 1 + 4; /* EID + len + OUI */ + len += 1 + 2; /* noa attr + attr len */ + len += 1 + 1; /* index + oppps_ctwindow */ + len += le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM) * + sizeof(struct ieee80211_p2p_noa_desc); + + return len; +} + +static void ath12k_p2p_noa_ie_assign(struct ath12k_link_vif *arvif, void *ie, + size_t len) +{ + struct ath12k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + kfree(arvif->ahvif->u.ap.noa_data); + + arvif->ahvif->u.ap.noa_data = ie; + arvif->ahvif->u.ap.noa_len = len; +} + +static void __ath12k_p2p_noa_update(struct ath12k_link_vif *arvif, + const struct ath12k_wmi_p2p_noa_info *noa) +{ + struct ath12k *ar = arvif->ar; + void *ie; + size_t len; + + lockdep_assert_held(&ar->data_lock); + + ath12k_p2p_noa_ie_assign(arvif, NULL, 0); + + len = ath12k_p2p_noa_ie_len_compute(noa); + if (!len) + return; + + ie = kmalloc(len, GFP_ATOMIC); + if (!ie) + return; + + ath12k_p2p_noa_ie_fill(ie, len, noa); + ath12k_p2p_noa_ie_assign(arvif, ie, len); +} + +void ath12k_p2p_noa_update(struct ath12k_link_vif *arvif, + const struct ath12k_wmi_p2p_noa_info *noa) +{ + struct ath12k *ar = arvif->ar; + + spin_lock_bh(&ar->data_lock); + __ath12k_p2p_noa_update(arvif, noa); + spin_unlock_bh(&ar->data_lock); +} + +static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_p2p_noa_arg *arg = data; + struct ath12k_link_vif *arvif; + + WARN_ON(!rcu_read_lock_any_held()); + arvif = &ahvif->deflink; + if (arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id) + return; + + ath12k_p2p_noa_update(arvif, arg->noa); +} + +void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id, + const struct ath12k_wmi_p2p_noa_info *noa) +{ + struct ath12k_p2p_noa_arg arg = { + .vdev_id = vdev_id, + .ar = ar, + .noa = noa, + }; + + ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), + IEEE80211_IFACE_ITER_NORMAL, + ath12k_p2p_noa_update_vdev_iter, + &arg); +} diff --git a/p2p.h b/p2p.h new file mode 100644 index 000000000000..03ee877e6d6b --- /dev/null +++ b/p2p.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.. + */ + +#ifndef ATH12K_P2P_H +#define ATH12K_P2P_H + +#include "wmi.h" + +struct ath12k_wmi_p2p_noa_info; + +struct ath12k_p2p_noa_arg { + u32 vdev_id; + struct ath12k *ar; + const struct ath12k_wmi_p2p_noa_info *noa; +}; + +void ath12k_p2p_noa_update(struct ath12k_link_vif *arvif, + const struct ath12k_wmi_p2p_noa_info *noa); +void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id, + const struct ath12k_wmi_p2p_noa_info *noa); + +#endif @@ -1,12 +1,14 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/msi.h> #include <linux/pci.h> +#include <linux/time.h> +#include <linux/vmalloc.h> #include "pci.h" #include "core.h" @@ -39,6 +41,10 @@ #define QCN9274_DEVICE_ID 0x1109 #define WCN7850_DEVICE_ID 0x1107 +#define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164 +#define DOMAIN_NUMBER_MASK GENMASK(7, 4) +#define BUS_NUMBER_MASK GENMASK(3, 0) + static const struct pci_device_id ath12k_pci_id_table[] = { { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) }, { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) }, @@ -60,6 +66,17 @@ static const struct ath12k_msi_config ath12k_msi_config[] = { }, }; +static const struct ath12k_msi_config msi_config_one_msi = { + .total_vectors = 1, + .total_users = 4, + .users = (struct ath12k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 1, .base_vector = 0 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 0 }, + { .name = "DP", .num_vectors = 1, .base_vector = 0 }, + }, +}; + static const char *irq_name[ATH12K_IRQ_NUM_MAX] = { "bhi", "mhi-er0", @@ -190,18 +207,17 @@ static u32 ath12k_pci_get_window_start(struct ath12k_base *ab, /* If offset lies within CE register range, use 2nd window */ else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) window_start = 2 * WINDOW_START; - /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE - * use 0th window - */ - else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) && - !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK)) - window_start = 0; else window_start = WINDOW_START; return window_start; } +static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset) +{ + return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END); +} + static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) { u32 val, delay; @@ -336,6 +352,7 @@ static void ath12k_pci_free_ext_irq(struct ath12k_base *ab) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); netif_napi_del(&irq_grp->napi); + free_netdev(irq_grp->napi_ndev); } } @@ -355,16 +372,30 @@ static void ath12k_pci_free_irq(struct ath12k_base *ab) static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id) { + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); u32 irq_idx; + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; enable_irq(ab->irq_num[irq_idx]); } static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id) { + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); u32 irq_idx; + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; disable_irq_nosync(ab->irq_num[irq_idx]); } @@ -373,6 +404,8 @@ static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab) { int i; + clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); + for (i = 0; i < ab->hw_params->ce_count; i++) { if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; @@ -394,42 +427,59 @@ static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab) } } -static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) +static void ath12k_pci_ce_workqueue(struct work_struct *work) { - struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); + struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq); + int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); - ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); + enable_irq(ce_pipe->ab->irq_num[irq_idx]); } static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) { struct ath12k_ce_pipe *ce_pipe = arg; + struct ath12k_base *ab = ce_pipe->ab; + int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; + + if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) + return IRQ_HANDLED; /* last interrupt received for this CE */ ce_pipe->timestamp = jiffies; - ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); - tasklet_schedule(&ce_pipe->intr_tq); + disable_irq_nosync(ab->irq_num[irq_idx]); + + queue_work(system_bh_wq, &ce_pipe->intr_wq); return IRQ_HANDLED; } static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) { + struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); int i; + /* In case of one MSI vector, we handle irq enable/disable + * in a uniform way since we only have one irq + */ + if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + for (i = 0; i < irq_grp->num_irq; i++) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } -static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc) +static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) { int i; + if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) + return; + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; ath12k_pci_ext_grp_disable(irq_grp); @@ -440,8 +490,15 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc) static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp) { + struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); int i; + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + for (i = 0; i < irq_grp->num_irq; i++) enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } @@ -467,11 +524,13 @@ static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) napi); struct ath12k_base *ab = irq_grp->ab; int work_done; + int i; work_done = ath12k_dp_service_srng(ab, irq_grp, budget); if (work_done < budget) { napi_complete_done(napi, work_done); - ath12k_pci_ext_grp_enable(irq_grp); + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } if (work_done > budget) @@ -483,13 +542,19 @@ static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) { struct ath12k_ext_irq_grp *irq_grp = arg; + struct ath12k_base *ab = irq_grp->ab; + int i; + + if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) + return IRQ_HANDLED; ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq); /* last interrupt received for this group */ irq_grp->timestamp = jiffies; - ath12k_pci_ext_grp_disable(irq_grp); + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); napi_schedule(&irq_grp->napi); @@ -498,8 +563,10 @@ static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) { - int i, j, ret, num_vectors = 0; + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + int i, j, n, ret, num_vectors = 0; u32 user_base_data = 0, base_vector = 0, base_idx; + struct ath12k_ext_irq_grp *irq_grp; base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; ret = ath12k_pci_get_user_msi_assignment(ab, "DP", @@ -510,13 +577,18 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) return ret; for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + irq_grp = &ab->ext_irq_grp[i]; u32 num_irq = 0; irq_grp->ab = ab; irq_grp->grp_id = i; - init_dummy_netdev(&irq_grp->napi_ndev); - netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + irq_grp->napi_ndev = alloc_netdev_dummy(0); + if (!irq_grp->napi_ndev) { + ret = -ENOMEM; + goto fail_allocate; + } + + netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath12k_pci_ext_grp_napi_poll); if (ab->hw_params->ring_mask->tx[i] || @@ -544,23 +616,42 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); ret = request_irq(irq, ath12k_pci_ext_interrupt_handler, - IRQF_SHARED, + ab_pci->irq_flags, "DP_EXT_IRQ", irq_grp); if (ret) { ath12k_err(ab, "failed request irq %d: %d\n", vector, ret); - return ret; + goto fail_request; } - - disable_irq_nosync(ab->irq_num[irq_idx]); } + ath12k_pci_ext_grp_disable(irq_grp); } return 0; + +fail_request: + /* i ->napi_ndev was properly allocated. Free it also */ + i += 1; +fail_allocate: + for (n = 0; n < i; n++) { + irq_grp = &ab->ext_irq_grp[n]; + free_netdev(irq_grp->napi_ndev); + } + return ret; +} + +static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci, + const struct cpumask *m) +{ + if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return 0; + + return irq_set_affinity_hint(ab_pci->pdev->irq, m); } static int ath12k_pci_config_irq(struct ath12k_base *ab) { + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); struct ath12k_ce_pipe *ce_pipe; u32 msi_data_start; u32 msi_data_count, msi_data_idx; @@ -586,10 +677,10 @@ static int ath12k_pci_config_irq(struct ath12k_base *ab) irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; - tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet); + INIT_WORK(&ce_pipe->intr_wq, ath12k_pci_ce_workqueue); ret = request_irq(irq, ath12k_pci_ce_interrupt_handler, - IRQF_SHARED, irq_name[irq_idx], + ab_pci->irq_flags, irq_name[irq_idx], ce_pipe); if (ret) { ath12k_err(ab, "failed to request irq %d: %d\n", @@ -614,18 +705,30 @@ static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab) { struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + struct pci_bus *bus = ab_pci->pdev->bus; + cfg->tgt_ce = ab->hw_params->target_ce_config; cfg->tgt_ce_len = ab->hw_params->target_ce_count; cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map; cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len; ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id; + + if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) { + ab_pci->qmi_instance = + u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) | + u32_encode_bits(bus->number, BUS_NUMBER_MASK); + ab->qmi.service_ins_id += ab_pci->qmi_instance; + } } static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab) { int i; + set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); + for (i = 0; i < ab->hw_params->ce_count; i++) { if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; @@ -670,16 +773,27 @@ static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci) msi_config->total_vectors, msi_config->total_vectors, PCI_IRQ_MSI); - if (num_vectors != msi_config->total_vectors) { - ath12k_err(ab, "failed to get %d MSI vectors, only %d available", - msi_config->total_vectors, num_vectors); - if (num_vectors >= 0) - return -EINVAL; - else - return num_vectors; + if (num_vectors == msi_config->total_vectors) { + set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); + ab_pci->irq_flags = IRQF_SHARED; + } else { + num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, + 1, + 1, + PCI_IRQ_MSI); + if (num_vectors < 0) { + ret = -EINVAL; + goto reset_msi_config; + } + clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); + ab_pci->msi_config = &msi_config_one_msi; + ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; + ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n"); } + ath12k_info(ab, "MSI vectors: %d\n", num_vectors); + ath12k_pci_msi_disable(ab_pci); msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); @@ -700,6 +814,7 @@ static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci) free_msi_vector: pci_free_irq_vectors(ab_pci->pdev); +reset_msi_config: return ret; } @@ -708,6 +823,25 @@ static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci) pci_free_irq_vectors(ab_pci->pdev); } +static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci) +{ + struct msi_desc *msi_desc; + + msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); + if (!msi_desc) { + ath12k_err(ab_pci->ab, "msi_desc is NULL!\n"); + pci_free_irq_vectors(ab_pci->pdev); + return -EINVAL; + } + + ab_pci->msi_ep_base_data = msi_desc->msg.data; + + ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n", + ab_pci->msi_ep_base_data); + + return 0; +} + static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) { struct ath12k_base *ab = ab_pci->ab; @@ -758,7 +892,7 @@ static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) goto release_region; } - ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem); return 0; release_region: @@ -794,20 +928,43 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); /* disable L0s and L1 */ - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC); set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags); } +static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + u32 reg; + + /* On platforms with two or more identical mhi devices, qmi service run + * with identical qrtr-node-id. Because of this identical ID qrtr-lookup + * cannot register more than one qmi service with identical node ID. + * + * This generates a unique instance ID from PCIe domain number and bus number, + * writes to the given register, it is available for firmware when the QMI service + * is spawned. + */ + reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK; + ath12k_pci_write32(ab, reg, ab_pci->qmi_instance); + + ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n", + reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg)); +} + static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) { - if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl); + if (ab_pci->ab->hw_params->supports_aspm && + test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) + pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + ab_pci->link_ctl & + PCI_EXP_LNKCTL_ASPMC); } -static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) +static void ath12k_pci_cancel_workqueue(struct ath12k_base *ab) { int i; @@ -817,7 +974,7 @@ static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; - tasklet_kill(&ce_pipe->intr_tq); + cancel_work_sync(&ce_pipe->intr_wq); } } @@ -825,7 +982,7 @@ static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab) { ath12k_pci_ce_irqs_disable(ab); ath12k_pci_sync_ce_irqs(ab); - ath12k_pci_kill_tasklets(ab); + ath12k_pci_cancel_workqueue(ab); } int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, @@ -889,11 +1046,11 @@ int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name, for (idx = 0; idx < msi_config->total_users; idx++) { if (strcmp(user_name, msi_config->users[idx].name) == 0) { *num_vectors = msi_config->users[idx].num_vectors; - *user_base_data = msi_config->users[idx].base_vector - + ab_pci->msi_ep_base_data; - *base_vector = msi_config->users[idx].base_vector; + *base_vector = msi_config->users[idx].base_vector; + *user_base_data = *base_vector + ab_pci->msi_ep_base_data; - ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", + ath12k_dbg(ab, ATH12K_DBG_PCI, + "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", user_name, *num_vectors, *user_base_data, *base_vector); @@ -960,10 +1117,15 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) napi_enable(&irq_grp->napi); ath12k_pci_ext_grp_enable(irq_grp); } + + set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); } void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) { + if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) + return; + __ath12k_pci_ext_irq_disable(ab); ath12k_pci_sync_ext_irqs(ab); } @@ -988,6 +1150,11 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab) void ath12k_pci_stop(struct ath12k_base *ab) { + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + + if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags)) + return; + ath12k_pci_ce_irq_disable_sync(ab); ath12k_ce_cleanup_pipes(ab); } @@ -998,7 +1165,10 @@ int ath12k_pci_start(struct ath12k_base *ab) set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); - ath12k_pci_aspm_restore(ab_pci); + if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + ath12k_pci_aspm_restore(ab_pci); + else + ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); ath12k_pci_ce_irqs_enable(ab); ath12k_ce_rx_post_buf(ab); @@ -1030,15 +1200,17 @@ u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset) if (window_start == WINDOW_START) { spin_lock_bh(&ab_pci->window_lock); ath12k_pci_select_window(ab_pci, offset); - val = ioread32(ab->mem + window_start + - (offset & WINDOW_RANGE_MASK)); + + if (ath12k_pci_is_offset_within_mhi_region(offset)) { + offset = offset - PCI_MHIREGLEN_REG; + val = ioread32(ab->mem + + (offset & WINDOW_RANGE_MASK)); + } else { + val = ioread32(ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + } spin_unlock_bh(&ab_pci->window_lock); } else { - if ((!window_start) && - (offset >= PCI_MHIREGLEN_REG && - offset <= PCI_MHI_REGION_END)) - offset = offset - PCI_MHIREGLEN_REG; - val = ioread32(ab->mem + window_start + (offset & WINDOW_RANGE_MASK)); } @@ -1075,15 +1247,17 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) if (window_start == WINDOW_START) { spin_lock_bh(&ab_pci->window_lock); ath12k_pci_select_window(ab_pci, offset); - iowrite32(value, ab->mem + window_start + - (offset & WINDOW_RANGE_MASK)); + + if (ath12k_pci_is_offset_within_mhi_region(offset)) { + offset = offset - PCI_MHIREGLEN_REG; + iowrite32(value, ab->mem + + (offset & WINDOW_RANGE_MASK)); + } else { + iowrite32(value, ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + } spin_unlock_bh(&ab_pci->window_lock); } else { - if ((!window_start) && - (offset >= PCI_MHIREGLEN_REG && - offset <= PCI_MHI_REGION_END)) - offset = offset - PCI_MHIREGLEN_REG; - iowrite32(value, ab->mem + window_start + (offset & WINDOW_RANGE_MASK)); } @@ -1095,6 +1269,186 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) ab_pci->pci_ops->release(ab); } +#ifdef CONFIG_ATH12K_COREDUMP +static int ath12k_pci_coredump_calculate_size(struct ath12k_base *ab, u32 *dump_seg_sz) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; + struct image_info *rddm_img, *fw_img; + struct ath12k_tlv_dump_data *dump_tlv; + enum ath12k_fw_crash_dump_type mem_type; + u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0; + struct ath12k_dump_file_data *file_data; + int i; + + rddm_img = mhi_ctrl->rddm_image; + if (!rddm_img) { + ath12k_err(ab, "No RDDM dump found\n"); + return 0; + } + + fw_img = mhi_ctrl->fbc_image; + + for (i = 0; i < fw_img->entries ; i++) { + if (!fw_img->mhi_buf[i].buf) + continue; + + paging_tlv_sz += fw_img->mhi_buf[i].len; + } + dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz; + + for (i = 0; i < rddm_img->entries; i++) { + if (!rddm_img->mhi_buf[i].buf) + continue; + + rddm_tlv_sz += rddm_img->mhi_buf[i].len; + } + dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz; + + for (i = 0; i < ab->qmi.mem_seg_count; i++) { + mem_type = ath12k_coredump_get_dump_type(ab->qmi.target_mem[i].type); + + if (mem_type == FW_CRASH_DUMP_NONE) + continue; + + if (mem_type == FW_CRASH_DUMP_TYPE_MAX) { + ath12k_dbg(ab, ATH12K_DBG_PCI, + "target mem region type %d not supported", + ab->qmi.target_mem[i].type); + continue; + } + + if (!ab->qmi.target_mem[i].paddr) + continue; + + dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size; + } + + for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) { + if (!dump_seg_sz[i]) + continue; + + len += sizeof(*dump_tlv) + dump_seg_sz[i]; + } + + if (len) + len += sizeof(*file_data); + + return len; +} + +static void ath12k_pci_coredump_download(struct ath12k_base *ab) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; + struct image_info *rddm_img, *fw_img; + struct timespec64 timestamp; + int i, len, mem_idx; + enum ath12k_fw_crash_dump_type mem_type; + struct ath12k_dump_file_data *file_data; + struct ath12k_tlv_dump_data *dump_tlv; + size_t hdr_len = sizeof(*file_data); + void *buf; + u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 }; + + ath12k_mhi_coredump(mhi_ctrl, false); + + len = ath12k_pci_coredump_calculate_size(ab, dump_seg_sz); + if (!len) { + ath12k_warn(ab, "No crash dump data found for devcoredump"); + return; + } + + rddm_img = mhi_ctrl->rddm_image; + fw_img = mhi_ctrl->fbc_image; + + /* dev_coredumpv() requires vmalloc data */ + buf = vzalloc(len); + if (!buf) + return; + + ab->dump_data = buf; + ab->ath12k_coredump_len = len; + file_data = ab->dump_data; + strscpy(file_data->df_magic, "ATH12K-FW-DUMP", sizeof(file_data->df_magic)); + file_data->len = cpu_to_le32(len); + file_data->version = cpu_to_le32(ATH12K_FW_CRASH_DUMP_V2); + file_data->chip_id = cpu_to_le32(ab_pci->dev_id); + file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id); + file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus)); + guid_gen(&file_data->guid); + ktime_get_real_ts64(×tamp); + file_data->tv_sec = cpu_to_le64(timestamp.tv_sec); + file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec); + buf += hdr_len; + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]); + buf += COREDUMP_TLV_HDR_SIZE; + + /* append all segments together as they are all part of a single contiguous + * block of memory + */ + for (i = 0; i < fw_img->entries ; i++) { + if (!fw_img->mhi_buf[i].buf) + continue; + + memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf, + fw_img->mhi_buf[i].len); + buf += fw_img->mhi_buf[i].len; + } + + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]); + buf += COREDUMP_TLV_HDR_SIZE; + + /* append all segments together as they are all part of a single contiguous + * block of memory + */ + for (i = 0; i < rddm_img->entries; i++) { + if (!rddm_img->mhi_buf[i].buf) + continue; + + memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf, + rddm_img->mhi_buf[i].len); + buf += rddm_img->mhi_buf[i].len; + } + + mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA; + for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) { + if (!dump_seg_sz[mem_idx] || mem_idx == FW_CRASH_DUMP_NONE) + continue; + + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(mem_idx); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]); + buf += COREDUMP_TLV_HDR_SIZE; + + for (i = 0; i < ab->qmi.mem_seg_count; i++) { + mem_type = ath12k_coredump_get_dump_type + (ab->qmi.target_mem[i].type); + + if (mem_type != mem_idx) + continue; + + if (!ab->qmi.target_mem[i].paddr) { + ath12k_dbg(ab, ATH12K_DBG_PCI, + "Skipping mem region type %d", + ab->qmi.target_mem[i].type); + continue; + } + + memcpy_fromio(buf, ab->qmi.target_mem[i].v.ioaddr, + ab->qmi.target_mem[i].size); + buf += ab->qmi.target_mem[i].size; + } + } + + queue_work(ab->workqueue, &ab->dump_work); +} +#endif + int ath12k_pci_power_up(struct ath12k_base *ab) { struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); @@ -1111,6 +1465,9 @@ int ath12k_pci_power_up(struct ath12k_base *ab) ath12k_pci_msi_enable(ab_pci); + if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) + ath12k_pci_update_qrtr_node_id(ab); + ret = ath12k_mhi_start(ab_pci); if (ret) { ath12k_err(ab, "failed to start mhi: %d\n", ret); @@ -1123,7 +1480,7 @@ int ath12k_pci_power_up(struct ath12k_base *ab) return 0; } -void ath12k_pci_power_down(struct ath12k_base *ab) +void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend) { struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); @@ -1132,11 +1489,18 @@ void ath12k_pci_power_down(struct ath12k_base *ab) ath12k_pci_force_wake(ab_pci->ab); ath12k_pci_msi_disable(ab_pci); - ath12k_mhi_stop(ab_pci); + ath12k_mhi_stop(ab_pci, is_suspend); clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); ath12k_pci_sw_reset(ab_pci->ab, false); } +static int ath12k_pci_panic_handler(struct ath12k_base *ab) +{ + ath12k_pci_sw_reset(ab, false); + + return NOTIFY_OK; +} + static const struct ath12k_hif_ops ath12k_pci_hif_ops = { .start = ath12k_pci_start, .stop = ath12k_pci_stop, @@ -1154,6 +1518,10 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = { .ce_irq_enable = ath12k_pci_hif_ce_irq_enable, .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, + .panic_handler = ath12k_pci_panic_handler, +#ifdef CONFIG_ATH12K_COREDUMP + .coredump_download = ath12k_pci_coredump_download, +#endif }; static @@ -1202,11 +1570,21 @@ static int ath12k_pci_probe(struct pci_dev *pdev, goto err_free_core; } + ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + + ab->id.vendor = pdev->vendor; + ab->id.device = pdev->device; + ab->id.subsystem_vendor = pdev->subsystem_vendor; + ab->id.subsystem_device = pdev->subsystem_device; + switch (pci_dev->device) { case QCN9274_DEVICE_ID: ab_pci->msi_config = &ath12k_msi_config[0]; ab->static_window_map = true; ab_pci->pci_ops = &ath12k_pci_ops_qcn9274; + ab->hal_rx_ops = &hal_rx_qcn9274_ops; ath12k_pci_read_hw_version(ab, &soc_hw_version_major, &soc_hw_version_minor); switch (soc_hw_version_major) { @@ -1225,9 +1603,11 @@ static int ath12k_pci_probe(struct pci_dev *pdev, } break; case WCN7850_DEVICE_ID: + ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD; ab_pci->msi_config = &ath12k_msi_config[0]; ab->static_window_map = false; ab_pci->pci_ops = &ath12k_pci_ops_wcn7850; + ab->hal_rx_ops = &hal_rx_wcn7850_ops; ath12k_pci_read_hw_version(ab, &soc_hw_version_major, &soc_hw_version_minor); switch (soc_hw_version_major) { @@ -1260,10 +1640,16 @@ static int ath12k_pci_probe(struct pci_dev *pdev, if (ret) goto err_pci_msi_free; + ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); + if (ret) { + ath12k_err(ab, "failed to set irq affinity %d\n", ret); + goto err_pci_msi_free; + } + ret = ath12k_mhi_register(ab_pci); if (ret) { ath12k_err(ab, "failed to register mhi: %d\n", ret); - goto err_pci_msi_free; + goto err_irq_affinity_cleanup; } ret = ath12k_hal_srng_init(ab); @@ -1284,6 +1670,17 @@ static int ath12k_pci_probe(struct pci_dev *pdev, goto err_ce_free; } + /* kernel may allocate a dummy vector before request_irq and + * then allocate a real vector when request_irq is called. + * So get msi_data here again to avoid spurious interrupt + * as msi_data will configured to srngs. + */ + ret = ath12k_pci_config_msi_data(ab_pci); + if (ret) { + ath12k_err(ab, "failed to config msi_data: %d\n", ret); + goto err_free_irq; + } + ret = ath12k_core_init(ab); if (ret) { ath12k_err(ab, "failed to init core: %d\n", ret); @@ -1306,6 +1703,9 @@ err_mhi_unregister: err_pci_msi_free: ath12k_pci_msi_free(ab_pci); +err_irq_affinity_cleanup: + ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); + err_pci_free_region: ath12k_pci_free_region(ab_pci); @@ -1320,16 +1720,21 @@ static void ath12k_pci_remove(struct pci_dev *pdev) struct ath12k_base *ab = pci_get_drvdata(pdev); struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); + if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { - ath12k_pci_power_down(ab); + ath12k_pci_power_down(ab, false); ath12k_qmi_deinit_service(ab); + ath12k_core_hw_group_unassign(ab); goto qmi_fail; } set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); cancel_work_sync(&ab->reset_work); + cancel_work_sync(&ab->dump_work); ath12k_core_deinit(ab); + ath12k_fw_unmap(ab); qmi_fail: ath12k_mhi_unregister(ab_pci); @@ -1346,8 +1751,10 @@ qmi_fail: static void ath12k_pci_shutdown(struct pci_dev *pdev) { struct ath12k_base *ab = pci_get_drvdata(pdev); + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); - ath12k_pci_power_down(ab); + ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); + ath12k_pci_power_down(ab, false); } static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev) @@ -1374,9 +1781,36 @@ static __maybe_unused int ath12k_pci_pm_resume(struct device *dev) return ret; } -static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops, - ath12k_pci_pm_suspend, - ath12k_pci_pm_resume); +static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev) +{ + struct ath12k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath12k_core_suspend_late(ab); + if (ret) + ath12k_warn(ab, "failed to late suspend core: %d\n", ret); + + return ret; +} + +static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev) +{ + struct ath12k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath12k_core_resume_early(ab); + if (ret) + ath12k_warn(ab, "failed to early resume core: %d\n", ret); + + return ret; +} + +static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend, + ath12k_pci_pm_resume) + SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late, + ath12k_pci_pm_resume_early) +}; static struct pci_driver ath12k_pci_driver = { .name = "ath12k_pci", @@ -1409,5 +1843,5 @@ static void ath12k_pci_exit(void) module_exit(ath12k_pci_exit); -MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_PCI_H #define ATH12K_PCI_H @@ -53,6 +53,9 @@ #define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 +#define QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB 0x1E20338 +#define OTP_BOARD_ID_MASK GENMASK(15, 0) + #define PCI_BAR_WINDOW0_BASE 0x1E00000 #define PCI_BAR_WINDOW0_END 0x1E7FFFC #define PCI_SOC_RANGE_MASK 0x3FFF @@ -84,6 +87,7 @@ enum ath12k_pci_flags { ATH12K_PCI_FLAG_INIT_DONE, ATH12K_PCI_FLAG_IS_MSI_64, ATH12K_PCI_ASPM_RESTORE, + ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, }; struct ath12k_pci_ops { @@ -100,6 +104,7 @@ struct ath12k_pci { struct mhi_controller *mhi_ctrl; const struct ath12k_msi_config *msi_config; unsigned long mhi_state; + enum mhi_callback mhi_pre_cb; u32 register_window; /* protects register_window above */ @@ -108,7 +113,9 @@ struct ath12k_pci { /* enum ath12k_pci_flags */ unsigned long flags; u16 link_ctl; + unsigned long irq_flags; const struct ath12k_pci_ops *pci_ops; + u32 qmi_instance; }; static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab) @@ -137,5 +144,5 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab); void ath12k_pci_stop(struct ath12k_base *ab); int ath12k_pci_start(struct ath12k_base *ab); int ath12k_pci_power_up(struct ath12k_base *ab); -void ath12k_pci_power_down(struct ath12k_base *ab); +void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend); #endif /* ATH12K_PCI_H */ @@ -1,13 +1,29 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" #include "peer.h" #include "debug.h" +static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr) +{ + struct ath12k_ml_peer *ml_peer; + + lockdep_assert_wiphy(ah->hw->wiphy); + + list_for_each_entry(ml_peer, &ah->ml_peers, list) { + if (!ether_addr_equal(ml_peer->addr, addr)) + continue; + + return ml_peer; + } + + return NULL; +} + struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id, const u8 *addr) { @@ -63,6 +79,20 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab, return NULL; } +static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab, + int ml_peer_id) +{ + struct ath12k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) + if (ml_peer_id == peer->ml_id) + return peer; + + return NULL; +} + struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id) { @@ -70,6 +100,9 @@ struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, lockdep_assert_held(&ab->base_lock); + if (peer_id & ATH12K_PEER_ML_ID_VALID) + return ath12k_peer_find_by_ml_id(ab, peer_id); + list_for_each_entry(peer, &ab->peers, list) if (peer_id == peer->peer_id) return peer; @@ -186,7 +219,7 @@ void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id) struct ath12k_peer *peer, *tmp; struct ath12k_base *ab = ar->ab; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); spin_lock_bh(&ab->base_lock); list_for_each_entry_safe(peer, tmp, &ab->peers, list) { @@ -231,22 +264,36 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id, return 0; } -int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr) +static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr) { + struct ath12k_base *ab = ar->ab; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->peer_delete_done); ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id); if (ret) { - ath12k_warn(ar->ab, + ath12k_warn(ab, "failed to delete peer vdev_id %d addr %pM ret %d\n", vdev_id, addr, ret); return ret; } + return 0; +} + +int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr) +{ + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + ret = ath12k_peer_delete_send(ar, vdev_id, addr); + if (ret) + return ret; + ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr); if (ret) return ret; @@ -261,14 +308,19 @@ static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true); } -int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, +int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ieee80211_sta *sta, struct ath12k_wmi_peer_create_arg *arg) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ath12k_link_sta *arsta; + u8 link_id = arvif->link_id; struct ath12k_peer *peer; + struct ath12k_sta *ahsta; + u16 ml_peer_id; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (ar->num_peers > (ar->max_num_peers - 1)) { ath12k_warn(ar->ab, @@ -326,11 +378,34 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, peer->pdev_idx = ar->pdev_idx; peer->sta = sta; - if (arvif->vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION) { arvif->ast_hash = peer->ast_hash; arvif->ast_idx = peer->hw_peer_id; } + if (sta) { + ahsta = ath12k_sta_to_ahsta(sta); + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, + ahsta->link[link_id]); + + peer->link_id = arsta->link_id; + + /* Fill ML info into created peer */ + if (sta->mlo) { + ml_peer_id = ahsta->ml_peer_id; + peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID; + ether_addr_copy(peer->ml_addr, sta->addr); + + /* the assoc link is considered primary for now */ + peer->primary_link = arsta->is_assoc_link; + peer->mlo = true; + } else { + peer->ml_id = ATH12K_MLO_PEER_ID_INVALID; + peer->primary_link = true; + peer->mlo = false; + } + } + peer->sec_type = HAL_ENCRYPT_TYPE_OPEN; peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN; @@ -340,3 +415,150 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, return 0; } + +static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah) +{ + u16 ml_peer_id; + + lockdep_assert_wiphy(ah->hw->wiphy); + + for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) { + if (test_bit(ml_peer_id, ah->free_ml_peer_id_map)) + continue; + + set_bit(ml_peer_id, ah->free_ml_peer_id_map); + break; + } + + if (ml_peer_id == ATH12K_MAX_MLO_PEERS) + ml_peer_id = ATH12K_MLO_PEER_ID_INVALID; + + return ml_peer_id; +} + +int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta) +{ + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_ml_peer *ml_peer; + + lockdep_assert_wiphy(ah->hw->wiphy); + + if (!sta->mlo) + return -EINVAL; + + ml_peer = ath12k_peer_ml_find(ah, sta->addr); + if (ml_peer) { + ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM", + ml_peer->id, sta->addr); + return -EEXIST; + } + + ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC); + if (!ml_peer) + return -ENOMEM; + + ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah); + + if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) { + ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM", + sta->addr); + kfree(ml_peer); + return -ENOMEM; + } + + ether_addr_copy(ml_peer->addr, sta->addr); + ml_peer->id = ahsta->ml_peer_id; + list_add(&ml_peer->list, &ah->ml_peers); + + return 0; +} + +int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta) +{ + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_ml_peer *ml_peer; + + lockdep_assert_wiphy(ah->hw->wiphy); + + if (!sta->mlo) + return -EINVAL; + + clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map); + ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID; + + ml_peer = ath12k_peer_ml_find(ah, sta->addr); + if (!ml_peer) { + ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr); + return -EINVAL; + } + + list_del(&ml_peer->list); + kfree(ml_peer); + + return 0; +} + +int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta) +{ + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); + struct ath12k_hw *ah = ahvif->ah; + struct ath12k_link_vif *arvif; + struct ath12k_link_sta *arsta; + unsigned long links; + struct ath12k *ar; + int ret, err_ret = 0; + u8 link_id; + + lockdep_assert_wiphy(ah->hw->wiphy); + + if (!sta->mlo) + return -EINVAL; + + /* FW expects delete of all link peers at once before waiting for reception + * of peer unmap or delete responses + */ + links = ahsta->links_map; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); + if (!arvif || !arsta) + continue; + + ar = arvif->ar; + if (!ar) + continue; + + ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr); + + ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr); + if (ret) { + ath12k_warn(ar->ab, + "failed to delete peer vdev_id %d addr %pM ret %d\n", + arvif->vdev_id, arsta->addr, ret); + err_ret = ret; + continue; + } + } + + /* Ensure all link peers are deleted and unmapped */ + links = ahsta->links_map; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); + if (!arvif || !arsta) + continue; + + ar = arvif->ar; + if (!ar) + continue; + + ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr); + if (ret) { + err_ret = ret; + continue; + } + ar->num_peers--; + } + + return err_ret; +} @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_PEER_H @@ -19,6 +19,8 @@ struct ppdu_user_delayba { u32 resp_rate_flags; }; +#define ATH12K_PEER_ML_ID_VALID BIT(13) + struct ath12k_peer { struct list_head list; struct ieee80211_sta *sta; @@ -44,6 +46,28 @@ struct ath12k_peer { struct ppdu_user_delayba ppdu_stats_delayba; bool delayba_flag; bool is_authorized; + bool mlo; + /* protected by ab->data_lock */ + bool dp_setup_done; + + u16 ml_id; + + /* any other ML info common for all partners can be added + * here and would be same for all partner peers. + */ + u8 ml_addr[ETH_ALEN]; + + /* To ensure only certain work related to dp is done once */ + bool primary_link; + + /* for reference to ath12k_link_sta */ + u8 link_id; +}; + +struct ath12k_ml_peer { + struct list_head list; + u8 addr[ETH_ALEN]; + u16 id; }; void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id); @@ -56,12 +80,15 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab, struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id); void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id); int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr); -int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, +int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ieee80211_sta *sta, struct ath12k_wmi_peer_create_arg *arg); int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id, const u8 *addr); bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id); struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash); +int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta); +int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta); +int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta); #endif /* _PEER_H_ */ @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/elf.h> @@ -17,7 +17,7 @@ #define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08 #define ATH12K_QMI_MAX_CHUNK_SIZE 2097152 -static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = { +static const struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, @@ -61,7 +61,7 @@ static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -511,7 +511,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -528,7 +528,86 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_phy_cap_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + num_phy_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + num_phy), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + board_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + board_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + single_chip_mlo_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + single_chip_mlo_support), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -753,7 +832,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -789,7 +868,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, @@ -821,7 +900,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -863,7 +942,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, @@ -890,7 +969,7 @@ static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, @@ -930,7 +1009,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, @@ -957,7 +1036,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -975,7 +1054,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -983,7 +1062,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1009,7 +1088,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1026,7 +1105,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1042,7 +1121,7 @@ static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, @@ -1068,7 +1147,7 @@ static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1094,7 +1173,7 @@ static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1348,7 +1427,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, @@ -1483,7 +1562,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1501,7 +1580,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, @@ -1525,7 +1604,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1542,7 +1621,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1595,7 +1674,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1630,7 +1709,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, @@ -1654,7 +1733,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1671,7 +1750,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1706,7 +1785,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1724,7 +1803,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -1862,7 +1941,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1879,54 +1958,210 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { }, }; -static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; -static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = { +static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; -static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req) +static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, + enable_fwlog_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, + enable_fwlog), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static void ath12k_host_cap_hw_link_id_init(struct ath12k_hw_group *ag) { + struct ath12k_base *ab, *partner_ab; + int i, j, hw_id_base; + + for (i = 0; i < ag->num_devices; i++) { + hw_id_base = 0; + ab = ag->ab[i]; + + for (j = 0; j < ag->num_devices; j++) { + partner_ab = ag->ab[j]; + + if (partner_ab->wsi_info.index >= ab->wsi_info.index) + continue; + + hw_id_base += partner_ab->qmi.num_radios; + } + + ab->wsi_info.hw_link_id_base = hw_id_base; + } + + ag->hw_link_id_init_done = true; +} + +static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab, + struct qmi_wlanfw_host_cap_req_msg_v01 *req) +{ + struct wlfw_host_mlo_chip_info_s_v01 *info; + struct ath12k_hw_group *ag = ab->ag; + struct ath12k_base *partner_ab; + u8 hw_link_id = 0; + int i, j, ret; + + if (!ag->mlo_capable) { + ath12k_dbg(ab, ATH12K_DBG_QMI, + "MLO is disabled hence skip QMI MLO cap"); + return 0; + } + + if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) { + ab->single_chip_mlo_supp = false; + + ath12k_dbg(ab, ATH12K_DBG_QMI, + "skip QMI MLO cap due to invalid num_radio %d\n", + ab->qmi.num_radios); + return 0; + } + + if (ab->device_id == ATH12K_INVALID_DEVICE_ID) { + ath12k_err(ab, "failed to send MLO cap due to invalid device id\n"); + return -EINVAL; + } + req->mlo_capable_valid = 1; req->mlo_capable = 1; req->mlo_chip_id_valid = 1; - req->mlo_chip_id = 0; + req->mlo_chip_id = ab->device_id; req->mlo_group_id_valid = 1; - req->mlo_group_id = 0; + req->mlo_group_id = ag->id; req->max_mlo_peer_valid = 1; /* Max peer number generally won't change for the same device * but needs to be synced with host driver. */ - req->max_mlo_peer = 32; + req->max_mlo_peer = ab->hw_params->max_mlo_peer; req->mlo_num_chips_valid = 1; - req->mlo_num_chips = 1; + req->mlo_num_chips = ag->num_devices; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo capability advertisement device_id %d group_id %d num_devices %d", + req->mlo_chip_id, req->mlo_group_id, req->mlo_num_chips); + + mutex_lock(&ag->mutex); + + if (!ag->hw_link_id_init_done) + ath12k_host_cap_hw_link_id_init(ag); + + for (i = 0; i < ag->num_devices; i++) { + info = &req->mlo_chip_info[i]; + partner_ab = ag->ab[i]; + + if (partner_ab->device_id == ATH12K_INVALID_DEVICE_ID) { + ath12k_err(ab, "failed to send MLO cap due to invalid partner device id\n"); + ret = -EINVAL; + goto device_cleanup; + } + + info->chip_id = partner_ab->device_id; + info->num_local_links = partner_ab->qmi.num_radios; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo device id %d num_link %d\n", + info->chip_id, info->num_local_links); + + for (j = 0; j < info->num_local_links; j++) { + info->hw_link_id[j] = partner_ab->wsi_info.hw_link_id_base + j; + info->valid_mlo_link_id[j] = 1; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo hw_link_id %d\n", + info->hw_link_id[j]); + + hw_link_id++; + } + } + + if (hw_link_id <= 0) + ag->mlo_capable = false; + req->mlo_chip_info_valid = 1; - req->mlo_chip_info[0].chip_id = 0; - req->mlo_chip_info[0].num_local_links = 2; - req->mlo_chip_info[0].hw_link_id[0] = 0; - req->mlo_chip_info[0].hw_link_id[1] = 1; - req->mlo_chip_info[0].valid_mlo_link_id[0] = 1; - req->mlo_chip_info[0].valid_mlo_link_id[1] = 1; + + mutex_unlock(&ag->mutex); + + return 0; + +device_cleanup: + for (i = i - 1; i >= 0; i--) { + info = &req->mlo_chip_info[i]; + + memset(info, 0, sizeof(*info)); + } + + req->mlo_num_chips = 0; + req->mlo_num_chips_valid = 0; + + req->max_mlo_peer = 0; + req->max_mlo_peer_valid = 0; + req->mlo_group_id = 0; + req->mlo_group_id_valid = 0; + req->mlo_chip_id = 0; + req->mlo_chip_id_valid = 0; + req->mlo_capable = 0; + req->mlo_capable_valid = 0; + + ag->mlo_capable = false; + + mutex_unlock(&ag->mutex); + + return ret; } -static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_host_cap_send(struct ath12k_base *ab) { - struct qmi_wlanfw_host_cap_req_msg_v01 req; - struct qmi_wlanfw_host_cap_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_wlanfw_host_cap_req_msg_v01 req = {}; + struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {}; + struct qmi_txn txn; int ret = 0; - memset(&req, 0, sizeof(req)); - memset(&resp, 0, sizeof(resp)); - req.num_clients_valid = 1; req.num_clients = 1; req.mem_cfg_mode = ab->qmi.target_mem_mode; @@ -1963,10 +2198,12 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) */ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET; - - ath12k_host_cap_parse_mlo(&req); } + ret = ath12k_host_cap_parse_mlo(ab, &req); + if (ret < 0) + goto out; + ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); if (ret < 0) @@ -1977,6 +2214,7 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_host_cap_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret); goto out; } @@ -1996,6 +2234,65 @@ out: return ret; } +static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) +{ + struct qmi_wlanfw_phy_cap_req_msg_v01 req = {}; + struct qmi_wlanfw_phy_cap_resp_msg_v01 resp = {}; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_PHY_CAP_REQ_V01, + QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_phy_cap_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath12k_warn(ab, "failed to send phy capability request: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ret = -EOPNOTSUPP; + goto out; + } + + if (resp.single_chip_mlo_support_valid && + resp.single_chip_mlo_support) + ab->single_chip_mlo_supp = true; + + if (!resp.num_phy_valid) { + ret = -ENODATA; + goto out; + } + + ab->qmi.num_radios = resp.num_phy; + + ath12k_dbg(ab, ATH12K_DBG_QMI, + "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n", + resp.num_phy_valid, resp.num_phy, + resp.board_id_valid, resp.board_id, + resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support); + + return; + +out: + /* If PHY capability not advertised then rely on default num link */ + ab->qmi.num_radios = ab->hw_params->def_num_link; + + ath12k_dbg(ab, ATH12K_DBG_QMI, + "no valid response from PHY capability, choose default num_phy %d\n", + ab->qmi.num_radios); +} + static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab) { struct qmi_wlanfw_ind_register_req_msg_v01 *req; @@ -2040,6 +2337,7 @@ static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab) QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_ind_register_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath12k_warn(ab, "Failed to send indication register request, err = %d\n", ret); goto out; @@ -2065,11 +2363,13 @@ resp_out: return ret; } -static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) { struct qmi_wlanfw_respond_mem_req_msg_v01 *req; - struct qmi_wlanfw_respond_mem_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {}; + struct qmi_txn txn; int ret = 0, i; bool delayed; @@ -2077,8 +2377,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) if (!req) return -ENOMEM; - memset(&resp, 0, sizeof(resp)); - /* Some targets by default request a block of big contiguous * DMA memory, it's hard to allocate from kernel. So host returns * failure to firmware and firmware then request multiple blocks of @@ -2088,7 +2386,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) delayed = true; ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n", ab->qmi.mem_seg_count); - memset(req, 0, sizeof(*req)); } else { delayed = false; req->mem_seg_len = ab->qmi.mem_seg_count; @@ -2114,6 +2411,7 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_respond_mem_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n", ret); goto out; @@ -2142,29 +2440,125 @@ out: return ret; } +static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab, + struct target_mem_chunk *chunk, + int idx) +{ + struct ath12k_hw_group *ag = ab->ag; + struct target_mem_chunk *mlo_chunk; + + lockdep_assert_held(&ag->mutex); + + if (!ag->mlo_mem.init_done || ag->num_started) + return; + + if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) { + ath12k_warn(ab, "invalid index for MLO memory chunk free: %d\n", idx); + return; + } + + mlo_chunk = &ag->mlo_mem.chunk[idx]; + if (mlo_chunk->v.addr) { + dma_free_coherent(ab->dev, + mlo_chunk->size, + mlo_chunk->v.addr, + mlo_chunk->paddr); + mlo_chunk->v.addr = NULL; + } + + mlo_chunk->paddr = 0; + mlo_chunk->size = 0; + chunk->v.addr = NULL; + chunk->paddr = 0; + chunk->size = 0; +} + static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab) { - int i; + struct ath12k_hw_group *ag = ab->ag; + int i, mlo_idx; - for (i = 0; i < ab->qmi.mem_seg_count; i++) { + for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) { if (!ab->qmi.target_mem[i].v.addr) continue; - dma_free_coherent(ab->dev, - ab->qmi.target_mem[i].size, - ab->qmi.target_mem[i].v.addr, - ab->qmi.target_mem[i].paddr); - ab->qmi.target_mem[i].v.addr = NULL; + + if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) { + ath12k_qmi_free_mlo_mem_chunk(ab, + &ab->qmi.target_mem[i], + mlo_idx++); + } else { + dma_free_coherent(ab->dev, + ab->qmi.target_mem[i].prev_size, + ab->qmi.target_mem[i].v.addr, + ab->qmi.target_mem[i].paddr); + ab->qmi.target_mem[i].v.addr = NULL; + } + } + + if (!ag->num_started && ag->mlo_mem.init_done) { + ag->mlo_mem.init_done = false; + ag->mlo_mem.mlo_mem_size = 0; } } +static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab, + struct target_mem_chunk *chunk) +{ + /* Firmware reloads in recovery/resume. + * In such cases, no need to allocate memory for FW again. + */ + if (chunk->v.addr) { + if (chunk->prev_type == chunk->type && + chunk->prev_size == chunk->size) + goto this_chunk_done; + + /* cannot reuse the existing chunk */ + dma_free_coherent(ab->dev, chunk->prev_size, + chunk->v.addr, chunk->paddr); + chunk->v.addr = NULL; + } + + chunk->v.addr = dma_alloc_coherent(ab->dev, + chunk->size, + &chunk->paddr, + GFP_KERNEL | __GFP_NOWARN); + if (!chunk->v.addr) { + if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) { + ab->qmi.target_mem_delayed = true; + ath12k_warn(ab, + "qmi dma allocation failed (%d B type %u), will try later with small size\n", + chunk->size, + chunk->type); + ath12k_qmi_free_target_mem_chunk(ab); + return -EAGAIN; + } + ath12k_warn(ab, "memory allocation failure for %u size: %d\n", + chunk->type, chunk->size); + return -ENOMEM; + } + chunk->prev_type = chunk->type; + chunk->prev_size = chunk->size; +this_chunk_done: + return 0; +} + static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) { - int i; - struct target_mem_chunk *chunk; + struct target_mem_chunk *chunk, *mlo_chunk; + struct ath12k_hw_group *ag = ab->ag; + int i, mlo_idx, ret; + int mlo_size = 0; + + mutex_lock(&ag->mutex); + + if (!ag->mlo_mem.init_done) { + memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk)); + ag->mlo_mem.init_done = true; + } ab->qmi.target_mem_delayed = false; - for (i = 0; i < ab->qmi.mem_seg_count; i++) { + for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) { chunk = &ab->qmi.target_mem[i]; /* Allocate memory for the region and the functionality supported @@ -2176,24 +2570,41 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) case M3_DUMP_REGION_TYPE: case PAGEABLE_MEM_REGION_TYPE: case CALDB_MEM_REGION_TYPE: - chunk->v.addr = dma_alloc_coherent(ab->dev, - chunk->size, - &chunk->paddr, - GFP_KERNEL | __GFP_NOWARN); - if (!chunk->v.addr) { - if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) { - ab->qmi.target_mem_delayed = true; - ath12k_warn(ab, - "qmi dma allocation failed (%d B type %u), will try later with small size\n", - chunk->size, - chunk->type); - ath12k_qmi_free_target_mem_chunk(ab); - return 0; + ret = ath12k_qmi_alloc_chunk(ab, chunk); + if (ret) + goto err; + break; + case MLO_GLOBAL_MEM_REGION_TYPE: + mlo_size += chunk->size; + if (ag->mlo_mem.mlo_mem_size && + mlo_size > ag->mlo_mem.mlo_mem_size) { + ath12k_err(ab, "QMI MLO memory allocation failure, requested size %d is more than allocated size %d", + mlo_size, ag->mlo_mem.mlo_mem_size); + ret = -EINVAL; + goto err; + } + + mlo_chunk = &ag->mlo_mem.chunk[mlo_idx]; + if (mlo_chunk->paddr) { + if (chunk->size != mlo_chunk->size) { + ath12k_err(ab, "QMI MLO chunk memory allocation failure for index %d, requested size %d is more than allocated size %d", + mlo_idx, chunk->size, mlo_chunk->size); + ret = -EINVAL; + goto err; } - ath12k_warn(ab, "memory allocation failure for %u size: %d\n", - chunk->type, chunk->size); - return -ENOMEM; + } else { + mlo_chunk->size = chunk->size; + mlo_chunk->type = chunk->type; + ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk); + if (ret) + goto err; + memset(mlo_chunk->v.addr, 0, mlo_chunk->size); } + + chunk->paddr = mlo_chunk->paddr; + chunk->v.addr = mlo_chunk->v.addr; + mlo_idx++; + break; default: ath12k_warn(ab, "memory type %u not supported\n", @@ -2203,21 +2614,48 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) break; } } + + if (!ag->mlo_mem.mlo_mem_size) { + ag->mlo_mem.mlo_mem_size = mlo_size; + } else if (ag->mlo_mem.mlo_mem_size != mlo_size) { + ath12k_err(ab, "QMI MLO memory size error, expected size is %d but requested size is %d", + ag->mlo_mem.mlo_mem_size, mlo_size); + ret = -EINVAL; + goto err; + } + + mutex_unlock(&ag->mutex); + return 0; + +err: + ath12k_qmi_free_target_mem_chunk(ab); + + mutex_unlock(&ag->mutex); + + /* The firmware will attempt to request memory in smaller chunks + * on the next try. However, the current caller should be notified + * that this instance of request parsing was successful. + * Therefore, return 0 only. + */ + if (ret == -EAGAIN) + ret = 0; + + return ret; } -static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_request_target_cap(struct ath12k_base *ab) { - struct qmi_wlanfw_cap_req_msg_v01 req; - struct qmi_wlanfw_cap_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_wlanfw_cap_req_msg_v01 req = {}; + struct qmi_wlanfw_cap_resp_msg_v01 resp = {}; + struct qmi_txn txn; unsigned int board_id = ATH12K_BOARD_ID_DEFAULT; int ret = 0; + int r; int i; - memset(&req, 0, sizeof(req)); - memset(&resp, 0, sizeof(resp)); - ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei, &resp); if (ret < 0) @@ -2228,6 +2666,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_cap_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n", ret); goto out; @@ -2277,7 +2716,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) ab->qmi.dev_mem[i].size = resp.dev_mem[i].size; ath12k_dbg(ab, ATH12K_DBG_QMI, - "devmem [%d] start ox%llx size %llu\n", i, + "devmem [%d] start 0x%llx size %llu\n", i, ab->qmi.dev_mem[i].start, ab->qmi.dev_mem[i].size); } @@ -2297,6 +2736,10 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) ab->qmi.target.fw_build_timestamp, ab->qmi.target.fw_build_id); + r = ath12k_core_check_smbios(ab); + if (r) + ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); + out: return ret; } @@ -2305,16 +2748,15 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab, const u8 *data, u32 len, u8 type) { struct qmi_wlanfw_bdf_download_req_msg_v01 *req; - struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {}; + struct qmi_txn txn; const u8 *temp = data; - int ret; + int ret = 0; u32 remaining = len; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; - memset(&resp, 0, sizeof(resp)); while (remaining) { req->valid = 1; @@ -2390,8 +2832,10 @@ out: return ret; } -static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab, - enum ath12k_qmi_bdf_type type) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab, + enum ath12k_qmi_bdf_type type) { struct device *dev = ab->dev; char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE]; @@ -2418,8 +2862,7 @@ static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab, break; case ATH12K_QMI_BDF_TYPE_REGDB: - ret = ath12k_core_fetch_board_data_api_1(ab, &bd, - ATH12K_REGDB_FILE_NAME); + ret = ath12k_core_fetch_regdb(ab, &bd); if (ret) { ath12k_warn(ab, "qmi failed to load regdb bin:\n"); goto out; @@ -2489,65 +2932,90 @@ out: return ret; } +static void ath12k_qmi_m3_free(struct ath12k_base *ab) +{ + struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; + + if (!m3_mem->vaddr) + return; + + dma_free_coherent(ab->dev, m3_mem->size, + m3_mem->vaddr, m3_mem->paddr); + m3_mem->vaddr = NULL; + m3_mem->size = 0; +} + static int ath12k_qmi_m3_load(struct ath12k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; - const struct firmware *fw; + const struct firmware *fw = NULL; + const void *m3_data; char path[100]; + size_t m3_len; int ret; - if (m3_mem->vaddr || m3_mem->size) - return 0; + if (ab->fw.m3_data && ab->fw.m3_len > 0) { + /* firmware-N.bin had a m3 firmware file so use that */ + m3_data = ab->fw.m3_data; + m3_len = ab->fw.m3_len; + } else { + /* No m3 file in firmware-N.bin so try to request old + * separate m3.bin. + */ + fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE); + if (IS_ERR(fw)) { + ret = PTR_ERR(fw); + ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE, + path, sizeof(path)); + ath12k_err(ab, "failed to load %s: %d\n", path, ret); + return ret; + } - fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE); - if (IS_ERR(fw)) { - ret = PTR_ERR(fw); - ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE, - path, sizeof(path)); - ath12k_err(ab, "failed to load %s: %d\n", path, ret); - return ret; + m3_data = fw->data; + m3_len = fw->size; + } + + /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */ + if (m3_mem->vaddr) { + if (m3_mem->size >= m3_len) + goto skip_m3_alloc; + + /* Old buffer is too small, free and reallocate */ + ath12k_qmi_m3_free(ab); } m3_mem->vaddr = dma_alloc_coherent(ab->dev, - fw->size, &m3_mem->paddr, + m3_len, &m3_mem->paddr, GFP_KERNEL); if (!m3_mem->vaddr) { ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n", fw->size); - release_firmware(fw); - return -ENOMEM; + ret = -ENOMEM; + goto out; } - memcpy(m3_mem->vaddr, fw->data, fw->size); - m3_mem->size = fw->size; - release_firmware(fw); +skip_m3_alloc: + memcpy(m3_mem->vaddr, m3_data, m3_len); + m3_mem->size = m3_len; - return 0; -} - -static void ath12k_qmi_m3_free(struct ath12k_base *ab) -{ - struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; + ret = 0; - if (!m3_mem->vaddr) - return; +out: + release_firmware(fw); - dma_free_coherent(ab->dev, m3_mem->size, - m3_mem->vaddr, m3_mem->paddr); - m3_mem->vaddr = NULL; + return ret; } -static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; - struct qmi_wlanfw_m3_info_req_msg_v01 req; - struct qmi_wlanfw_m3_info_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_wlanfw_m3_info_req_msg_v01 req = {}; + struct qmi_wlanfw_m3_info_resp_msg_v01 resp = {}; + struct qmi_txn txn; int ret = 0; - memset(&req, 0, sizeof(req)); - memset(&resp, 0, sizeof(resp)); - ret = ath12k_qmi_m3_load(ab); if (ret) { ath12k_err(ab, "failed to load m3 firmware: %d", ret); @@ -2567,6 +3035,7 @@ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, qmi_wlanfw_m3_info_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n", ret); goto out; @@ -2591,14 +3060,11 @@ out: static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab, u32 mode) { - struct qmi_wlanfw_wlan_mode_req_msg_v01 req; - struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {}; + struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp = {}; + struct qmi_txn txn; int ret = 0; - memset(&req, 0, sizeof(req)); - memset(&resp, 0, sizeof(resp)); - req.mode = mode; req.hw_debug_valid = 1; req.hw_debug = 0; @@ -2613,6 +3079,7 @@ static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab, QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n", mode, ret); goto out; @@ -2643,10 +3110,10 @@ out: static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab) { struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req; - struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp; + struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp = {}; struct ce_pipe_config *ce_cfg; struct service_to_pipe *svc_cfg; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0, pipe_num; ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce; @@ -2656,8 +3123,6 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab) if (!req) return -ENOMEM; - memset(&resp, 0, sizeof(resp)); - req->host_version_valid = 1; strscpy(req->host_version, ATH12K_HOST_VERSION_STRING, sizeof(req->host_version)); @@ -2704,6 +3169,7 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab) QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n", ret); goto out; @@ -2727,10 +3193,55 @@ out: return ret; } +static int ath12k_qmi_wlanfw_wlan_ini_send(struct ath12k_base *ab) +{ + struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {}; + struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {}; + struct qmi_txn txn; + int ret; + + req.enable_fwlog_valid = true; + req.enable_fwlog = 1; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01, + QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath12k_warn(ab, "failed to send QMI wlan ini request: %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + ath12k_warn(ab, "failed to receive QMI wlan ini request: %d\n", ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath12k_warn(ab, "QMI wlan ini response failure: %d %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } + +out: + return ret; +} + void ath12k_qmi_firmware_stop(struct ath12k_base *ab) { int ret; + clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags); + ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF); if (ret < 0) { ath12k_warn(ab, "qmi failed to send wlan mode off\n"); @@ -2743,6 +3254,12 @@ int ath12k_qmi_firmware_start(struct ath12k_base *ab, { int ret; + ret = ath12k_qmi_wlanfw_wlan_ini_send(ab); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send wlan fw ini: %d\n", ret); + return ret; + } + ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab); if (ret < 0) { ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret); @@ -2781,27 +3298,103 @@ ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi, return 0; } -static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi) +void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab) { - struct ath12k_base *ab = qmi->ab; + struct ath12k_qmi *qmi = &ab->qmi; + + spin_lock(&qmi->event_lock); + + if (ath12k_qmi_get_event_block(qmi)) + ath12k_qmi_set_event_block(qmi, false); + + spin_unlock(&qmi->event_lock); + + ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n", + ab->device_id); + + ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL); +} + +static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i; + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + + if (!(ab && ab->qmi.num_radios != U8_MAX)) + return false; + } + + return true; +} + +static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag) +{ + struct ath12k_base *ab; + int i; + + lockdep_assert_held(&ag->mutex); + + for (i = 0; i < ag->num_devices; i++) { + ab = ag->ab[i]; + if (!ab) + continue; + + spin_lock(&ab->qmi.event_lock); + + if (ath12k_qmi_get_event_block(&ab->qmi)) { + spin_unlock(&ab->qmi.event_lock); + return ab; + } + + spin_unlock(&ab->qmi.event_lock); + } + + return NULL; +} + +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi) +{ + struct ath12k_base *ab = qmi->ab, *block_ab; + struct ath12k_hw_group *ag = ab->ag; int ret; + ath12k_qmi_phy_cap_send(ab); + ret = ath12k_qmi_fw_ind_register_send(ab); if (ret < 0) { ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret); return ret; } - ret = ath12k_qmi_host_cap_send(ab); - if (ret < 0) { - ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret); - return ret; + spin_lock(&qmi->event_lock); + + ath12k_qmi_set_event_block(qmi, true); + + spin_unlock(&qmi->event_lock); + + mutex_lock(&ag->mutex); + + if (ath12k_qmi_hw_group_host_cap_ready(ag)) { + ath12k_core_hw_group_set_mlo_capable(ag); + + block_ab = ath12k_qmi_hw_group_find_blocked(ag); + if (block_ab) + ath12k_qmi_trigger_host_cap(block_ab); } + mutex_unlock(&ag->mutex); + return ret; } -static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) { struct ath12k_base *ab = qmi->ab; int ret; @@ -2815,7 +3408,9 @@ static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) return ret; } -static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi) +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack +int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi) { struct ath12k_base *ab = qmi->ab; int ret; @@ -2935,6 +3530,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = { .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01), .fn = ath12k_qmi_msg_fw_ready_cb, }, + + /* end of list */ + {}, }; static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, @@ -2977,6 +3575,21 @@ static const struct qmi_ops ath12k_qmi_ops = { .del_server = ath12k_qmi_ops_del_server, }; +static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi) +{ + struct ath12k_base *ab = qmi->ab; + int ret; + + ret = ath12k_qmi_host_cap_send(ab); + if (ret < 0) { + ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n", + ab->device_id, ret); + return ret; + } + + return ret; +} + static void ath12k_qmi_driver_event_work(struct work_struct *work) { struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi, @@ -3003,7 +3616,6 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work) break; case ATH12K_QMI_EVENT_SERVER_EXIT: set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); - set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); break; case ATH12K_QMI_EVENT_REQUEST_MEM: ret = ath12k_qmi_event_mem_request(qmi); @@ -3017,19 +3629,28 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work) break; case ATH12K_QMI_EVENT_FW_READY: clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { - ath12k_hal_dump_srng_stats(ab); + if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) { + if (ab->is_reset) + ath12k_hal_dump_srng_stats(ab); + + set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); queue_work(ab->workqueue, &ab->restart_work); break; } clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); - clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); - ath12k_core_qmi_firmware_ready(ab); - set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); + ret = ath12k_core_qmi_firmware_ready(ab); + if (!ret) + set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, + &ab->dev_flags); break; + case ATH12K_QMI_EVENT_HOST_CAP: + ret = ath12k_qmi_event_host_cap(qmi); + if (ret < 0) + set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); + break; default: ath12k_warn(ab, "invalid event type: %d", event->type); break; @@ -3082,9 +3703,19 @@ int ath12k_qmi_init_service(struct ath12k_base *ab) void ath12k_qmi_deinit_service(struct ath12k_base *ab) { + if (!ab->qmi.ab) + return; + qmi_handle_release(&ab->qmi.handle); cancel_work_sync(&ab->qmi.event_work); destroy_workqueue(ab->qmi.event_wq); ath12k_qmi_m3_free(ab); ath12k_qmi_free_target_mem_chunk(ab); + ab->qmi.ab = NULL; +} + +void ath12k_qmi_free_resource(struct ath12k_base *ab) +{ + ath12k_qmi_free_target_mem_chunk(ab); + ath12k_qmi_m3_free(ab); } @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_QMI_H @@ -15,7 +15,6 @@ #define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64 #define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000 #define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128 -#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07 #define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45 #define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01 #define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02 @@ -69,6 +68,7 @@ enum ath12k_qmi_event_type { ATH12K_QMI_EVENT_FORCE_FW_ASSERT, ATH12K_QMI_EVENT_POWER_UP, ATH12K_QMI_EVENT_POWER_DOWN, + ATH12K_QMI_EVENT_HOST_CAP, ATH12K_QMI_EVENT_MAX, }; @@ -97,6 +97,8 @@ struct ath12k_qmi_event_msg { struct target_mem_chunk { u32 size; u32 type; + u32 prev_size; + u32 prev_type; dma_addr_t paddr; union { void __iomem *ioaddr; @@ -141,6 +143,11 @@ struct ath12k_qmi { u32 target_mem_mode; bool target_mem_delayed; u8 cal_done; + + /* protected with struct ath12k_qmi::event_lock */ + bool block_event; + + u8 num_radios; struct target_info target; struct m3_mem_region m3_mem; unsigned int service_ins_id; @@ -165,6 +172,7 @@ enum ath12k_qmi_target_mem { BDF_MEM_REGION_TYPE = 0x2, M3_DUMP_REGION_TYPE = 0x3, CALDB_MEM_REGION_TYPE = 0x4, + MLO_GLOBAL_MEM_REGION_TYPE = 0x8, PAGEABLE_MEM_REGION_TYPE = 0x9, }; @@ -251,6 +259,24 @@ struct qmi_wlanfw_host_cap_resp_msg_v01 { struct qmi_response_type_v01 resp; }; +#define QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN 0 +#define QMI_WLANFW_PHY_CAP_REQ_V01 0x0057 +#define QMI_WLANFW_PHY_CAP_RESP_MSG_V01_MAX_LEN 18 +#define QMI_WLANFW_PHY_CAP_RESP_V01 0x0057 + +struct qmi_wlanfw_phy_cap_req_msg_v01 { +}; + +struct qmi_wlanfw_phy_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 num_phy_valid; + u8 num_phy; + u8 board_id_valid; + u32 board_id; + u8 single_chip_mlo_support_valid; + u8 single_chip_mlo_support; +}; + #define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54 #define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020 #define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18 @@ -559,12 +585,41 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 { struct qmi_response_type_v01 resp; }; +#define ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01 0x002F +#define ATH12K_QMI_WLANFW_WLAN_INI_RESP_V01 0x002F +#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 7 +#define QMI_WLANFW_WLAN_INI_RESP_MSG_V01_MAX_LEN 7 + +struct qmi_wlanfw_wlan_ini_req_msg_v01 { + /* Must be set to true if enable_fwlog is being passed */ + u8 enable_fwlog_valid; + u8 enable_fwlog; +}; + +struct qmi_wlanfw_wlan_ini_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block) +{ + lockdep_assert_held(&qmi->event_lock); + + qmi->block_event = block; +} + +static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi) +{ + lockdep_assert_held(&qmi->event_lock); + + return qmi->block_event; +} + int ath12k_qmi_firmware_start(struct ath12k_base *ab, u32 mode); void ath12k_qmi_firmware_stop(struct ath12k_base *ab); -void ath12k_qmi_event_work(struct work_struct *work); -void ath12k_qmi_msg_recv_work(struct work_struct *work); void ath12k_qmi_deinit_service(struct ath12k_base *ab); int ath12k_qmi_init_service(struct ath12k_base *ab); +void ath12k_qmi_free_resource(struct ath12k_base *ab); +void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab); #endif @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/rtnetlink.h> #include "core.h" @@ -28,11 +28,11 @@ static const struct ieee80211_regdomain ath12k_world_regd = { } }; -static bool ath12k_regdom_changes(struct ath12k *ar, char *alpha2) +static bool ath12k_regdom_changes(struct ieee80211_hw *hw, char *alpha2) { const struct ieee80211_regdomain *regd; - regd = rcu_dereference_rtnl(ar->hw->wiphy->regd); + regd = rcu_dereference_rtnl(hw->wiphy->regd); /* This can happen during wiphy registration where the previous * user request is received before we update the regd received * from firmware. @@ -48,8 +48,9 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath12k_wmi_init_country_arg arg; - struct ath12k *ar = hw->priv; - int ret; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + int ret, i; ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Regulatory Notification received for %s\n", wiphy_name(wiphy)); @@ -71,7 +72,7 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) return; } - if (!ath12k_regdom_changes(ar, request->alpha2)) { + if (!ath12k_regdom_changes(hw, request->alpha2)) { ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Country is already set\n"); return; } @@ -84,10 +85,16 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) memcpy(&arg.cc_info.alpha2, request->alpha2, 2); arg.cc_info.alpha2[2] = 0; - ret = ath12k_wmi_send_init_country_cmd(ar, &arg); - if (ret) - ath12k_warn(ar->ab, - "INIT Country code set to fw failed : %d\n", ret); + /* Allow fresh updates to wiphy regd */ + ah->regd_updated = false; + + /* Send the reg change request to all the radios */ + for_each_ar(ah, ar, i) { + ret = ath12k_wmi_send_init_country_cmd(ar, &arg); + if (ret) + ath12k_warn(ar->ab, + "INIT Country code set to fw failed : %d\n", ret); + } } int ath12k_reg_update_chan_list(struct ath12k *ar) @@ -95,7 +102,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar) struct ieee80211_supported_band **bands; struct ath12k_wmi_scan_chan_list_arg *arg; struct ieee80211_channel *channel; - struct ieee80211_hw *hw = ar->hw; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); struct ath12k_wmi_channel_arg *ch; enum nl80211_band band; int num_channels = 0; @@ -103,7 +110,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar) bands = hw->wiphy->bands; for (band = 0; band < NUM_NL80211_BANDS; band++) { - if (!bands[band]) + if (!(ar->mac.sbands[band].channels && bands[band])) continue; for (i = 0; i < bands[band]->n_channels; i++) { @@ -129,7 +136,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar) ch = arg->channel; for (band = 0; band < NUM_NL80211_BANDS; band++) { - if (!bands[band]) + if (!(ar->mac.sbands[band].channels && bands[band])) continue; for (i = 0; i < bands[band]->n_channels; i++) { @@ -199,11 +206,34 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig, int ath12k_regd_update(struct ath12k *ar, bool init) { + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + struct ieee80211_hw *hw = ah->hw; struct ieee80211_regdomain *regd, *regd_copy = NULL; int ret, regd_len, pdev_id; struct ath12k_base *ab; + int i; ab = ar->ab; + + /* If one of the radios within ah has already updated the regd for + * the wiphy, then avoid setting regd again + */ + if (ah->regd_updated) + return 0; + + /* firmware provides reg rules which are similar for 2 GHz and 5 GHz + * pdev but 6 GHz pdev has superset of all rules including rules for + * all bands, we prefer 6 GHz pdev's rules to be used for setup of + * the wiphy regd. + * If 6 GHz pdev was part of the ath12k_hw, wait for the 6 GHz pdev, + * else pick the first pdev which calls this function and use its + * regd to update global hw regd. + * The regd_updated flag set at the end will not allow any further + * updates. + */ + if (ah->use_6ghz_regd && !ar->supports_6ghz) + return 0; + pdev_id = ar->pdev_idx; spin_lock_bh(&ab->base_lock); @@ -246,9 +276,9 @@ int ath12k_regd_update(struct ath12k *ar, bool init) } rtnl_lock(); - wiphy_lock(ar->hw->wiphy); - ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy); - wiphy_unlock(ar->hw->wiphy); + wiphy_lock(hw->wiphy); + ret = regulatory_set_wiphy_regd_sync(hw->wiphy, regd_copy); + wiphy_unlock(hw->wiphy); rtnl_unlock(); kfree(regd_copy); @@ -256,12 +286,20 @@ int ath12k_regd_update(struct ath12k *ar, bool init) if (ret) goto err; - if (ar->state == ATH12K_STATE_ON) { + if (ah->state != ATH12K_HW_STATE_ON) + goto skip; + + ah->regd_updated = true; + /* Apply the new regd to all the radios, this is expected to be received only once + * since we check for ah->regd_updated and allow here only once. + */ + for_each_ar(ah, ar, i) { + ab = ar->ab; ret = ath12k_reg_update_chan_list(ar); if (ret) goto err; } - +skip: return 0; err: ath12k_warn(ab, "failed to perform regd update : %d\n", ret); @@ -314,6 +352,19 @@ static u32 ath12k_map_fw_reg_flags(u16 reg_flags) return flags; } +static u32 ath12k_map_fw_phy_flags(u32 phy_flags) +{ + u32 flags = 0; + + if (phy_flags & ATH12K_REG_PHY_BITMAP_NO11AX) + flags |= NL80211_RRF_NO_HE; + + if (phy_flags & ATH12K_REG_PHY_BITMAP_NO11BE) + flags |= NL80211_RRF_NO_EHT; + + return flags; +} + static bool ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1, struct ieee80211_reg_rule *rule2) @@ -638,6 +689,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab, } flags |= ath12k_map_fw_reg_flags(reg_rule->flags); + flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap); ath12k_reg_update_rule(tmp_regd->reg_rules + i, reg_rule->start_freq, @@ -715,10 +767,10 @@ void ath12k_regd_update_work(struct work_struct *work) } } -void ath12k_reg_init(struct ath12k *ar) +void ath12k_reg_init(struct ieee80211_hw *hw) { - ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; - ar->hw->wiphy->reg_notifier = ath12k_reg_notifier; + hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; + hw->wiphy->reg_notifier = ath12k_reg_notifier; } void ath12k_reg_free(struct ath12k_base *ab) @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_REG_H @@ -83,7 +83,13 @@ struct ath12k_reg_info { [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; }; -void ath12k_reg_init(struct ath12k *ar); +/* Phy bitmaps */ +enum ath12k_reg_phy_bitmap { + ATH12K_REG_PHY_BITMAP_NO11AX = BIT(5), + ATH12K_REG_PHY_BITMAP_NO11BE = BIT(6), +}; + +void ath12k_reg_init(struct ieee80211_hw *hw); void ath12k_reg_free(struct ath12k_base *ab); void ath12k_regd_update_work(struct work_struct *work); struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab, diff --git a/rx_desc.h b/rx_desc.h index f99556a253e5..10366bbe9999 100644 --- a/rx_desc.h +++ b/rx_desc.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_RX_DESC_H #define ATH12K_RX_DESC_H @@ -147,6 +147,61 @@ struct rx_mpdu_start_qcn9274 { __le32 res1; } __packed; +#define QCN9274_MPDU_START_SELECT_MPDU_START_TAG BIT(0) +#define QCN9274_MPDU_START_SELECT_INFO0_REO_QUEUE_DESC_LO BIT(1) +#define QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 BIT(2) +#define QCN9274_MPDU_START_SELECT_PN_95_32 BIT(3) +#define QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 BIT(4) +#define QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID BIT(5) +#define QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 BIT(6) +#define QCN9274_MPDU_START_SELECT_INFO5_INFO6 BIT(7) +#define QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 BIT(8) +#define QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 BIT(9) +#define QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL BIT(10) +#define QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL BIT(11) +#define QCN9274_MPDU_START_SELECT_HT_CTRL_INFO7 BIT(12) +#define QCN9274_MPDU_START_SELECT_ML_ADDR1_47_0_ML_ADDR2_15_0 BIT(13) +#define QCN9274_MPDU_START_SELECT_ML_ADDR2_47_16_INFO8 BIT(14) +#define QCN9274_MPDU_START_SELECT_RES_0_RES_1 BIT(15) + +#define QCN9274_MPDU_START_WMASK (QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 | \ + QCN9274_MPDU_START_SELECT_PN_95_32 | \ + QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 | \ + QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID | \ + QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 | \ + QCN9274_MPDU_START_SELECT_INFO5_INFO6 | \ + QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 | \ + QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 | \ + QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL | \ + QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL) + +/* The below rx_mpdu_start_qcn9274_compact structure is tied with the mask + * value QCN9274_MPDU_START_WMASK. If the mask value changes the structure + * will also change. + */ + +struct rx_mpdu_start_qcn9274_compact { + __le32 info1; + __le32 pn[4]; + __le32 info2; + __le32 peer_meta_data; + __le16 info3; + __le16 phy_ppdu_id; + __le16 ast_index; + __le16 sw_peer_id; + __le32 info4; + __le32 info5; + __le32 info6; + __le16 frame_ctrl; + __le16 duration; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; + u8 addr4[ETH_ALEN]; + __le16 qos_ctrl; +} __packed; + /* rx_mpdu_start * * reo_destination_indication @@ -221,7 +276,7 @@ struct rx_mpdu_start_qcn9274 { * PPE routing even if RXOLE CCE or flow search indicate 'Use_PPE' * This is set by SW for peers which are being handled by a * host SW/accelerator subsystem that also handles packet - * uffer management for WiFi-to-PPE routing. + * buffer management for WiFi-to-PPE routing. * * This is cleared by SW for peers which are being handled * by a different subsystem, completely disabling WiFi-to-PPE @@ -608,6 +663,8 @@ enum rx_msdu_start_reception_type { RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO, }; +#define RX_MSDU_END_64_TLV_SRC_LINK_ID GENMASK(24, 22) + #define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0) #define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2) @@ -650,6 +707,7 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO7_AGGR_COUNT GENMASK(7, 0) #define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8) #define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9) + #define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10) #define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11) #define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12) @@ -698,6 +756,7 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO12_RATE_MCS GENMASK(17, 14) #define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18) #define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21) + #define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24) #define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31) @@ -714,7 +773,6 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO13_EOSP BIT(11) #define RX_MSDU_END_INFO13_A_MSDU_ERROR BIT(12) #define RX_MSDU_END_INFO13_ORDER BIT(14) -#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15) #define RX_MSDU_END_INFO13_OVERFLOW_ERR BIT(16) #define RX_MSDU_END_INFO13_MSDU_LEN_ERR BIT(17) #define RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL BIT(18) @@ -732,6 +790,8 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30) #define RX_MSDU_END_INFO13_FCS_ERR BIT(31) +#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15) + #define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10) #define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13) #define RX_MSDU_END_INFO14_MSDU_DONE BIT(31) @@ -782,6 +842,52 @@ struct rx_msdu_end_qcn9274 { __le32 info14; } __packed; +#define QCN9274_MSDU_END_SELECT_MSDU_END_TAG BIT(0) +#define QCN9274_MSDU_END_SELECT_INFO0_PHY_PPDUID_IP_HDR_CSUM_INFO1 BIT(1) +#define QCN9274_MSDU_END_SELECT_INFO2_CUMULATIVE_CSUM_RULE_IND_0 BIT(2) +#define QCN9274_MSDU_END_SELECT_IPV6_OP_CRC_INFO3_TYPE13 BIT(3) +#define QCN9274_MSDU_END_SELECT_RULE_IND_1_TCP_SEQ_NUM BIT(4) +#define QCN9274_MSDU_END_SELECT_TCP_ACK_NUM_INFO4_WINDOW_SIZE BIT(5) +#define QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID BIT(6) +#define QCN9274_MSDU_END_SELECT_INFO6_FSE_METADATA BIT(7) +#define QCN9274_MSDU_END_SELECT_CCE_MDATA_TCP_UDP_CSUM_INFO7_IP_LEN BIT(8) +#define QCN9274_MSDU_END_SELECT_INFO8_INFO9 BIT(9) +#define QCN9274_MSDU_END_SELECT_INFO10_INFO11 BIT(10) +#define QCN9274_MSDU_END_SELECT_VLAN_CTAG_STAG_CI_PEER_MDATA BIT(11) +#define QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ BIT(12) +#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA BIT(13) +#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_31_0_TOEPLITZ_HASH_2_4 BIT(14) +#define QCN9274_MSDU_END_SELECT_RES0_SA_47_0 BIT(15) +#define QCN9274_MSDU_END_SELECT_INFO13_INFO14 BIT(16) + +#define QCN9274_MSDU_END_WMASK (QCN9274_MSDU_END_SELECT_MSDU_END_TAG | \ + QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID | \ + QCN9274_MSDU_END_SELECT_INFO10_INFO11 | \ + QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ | \ + QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA | \ + QCN9274_MSDU_END_SELECT_INFO13_INFO14) + +/* The below rx_msdu_end_qcn9274_compact structure is tied with the mask value + * QCN9274_MSDU_END_WMASK. If the mask value changes the structure will also + * change. + */ + +struct rx_msdu_end_qcn9274_compact { + __le64 msdu_end_tag; + __le16 sa_sw_peer_id; + __le16 info5; + __le16 sa_idx; + __le16 da_idx_or_sw_peer_id; + __le32 info10; + __le32 info11; + __le32 info12; + __le32 flow_id_toeplitz; + __le32 ppdu_start_timestamp_63_32; + __le32 phy_meta_data; + __le32 info13; + __le32 info14; +} __packed; + /* rx_msdu_end * * rxpcu_mpdu_filter_in_category @@ -1387,16 +1493,18 @@ struct rx_msdu_end_qcn9274 { * */ -/* TODO: Move to compact TLV approach - * By default these tlv's are not aligned to 128b boundary - * Need to remove unused qwords and make them compact/aligned - */ struct hal_rx_desc_qcn9274 { struct rx_msdu_end_qcn9274 msdu_end; struct rx_mpdu_start_qcn9274 mpdu_start; u8 msdu_payload[]; } __packed; +struct hal_rx_desc_qcn9274_compact { + struct rx_msdu_end_qcn9274_compact msdu_end; + struct rx_mpdu_start_qcn9274_compact mpdu_start; + u8 msdu_payload[]; +} __packed; + #define RX_BE_PADDING0_BYTES 8 #define RX_BE_PADDING1_BYTES 8 @@ -1421,6 +1529,7 @@ struct hal_rx_desc_wcn7850 { struct hal_rx_desc { union { struct hal_rx_desc_qcn9274 qcn9274; + struct hal_rx_desc_qcn9274_compact qcn9274_compact; struct hal_rx_desc_wcn7850 wcn7850; } u; } __packed; @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) @@ -36,8 +36,8 @@ TRACE_EVENT(ath12k_htt_pktlog, ), TP_fast_assign( - __assign_str(device, dev_name(ar->ab->dev)); - __assign_str(driver, dev_driver_string(ar->ab->dev)); + __assign_str(device); + __assign_str(driver); __entry->buf_len = buf_len; __entry->pktlog_checksum = pktlog_checksum; memcpy(__get_dynamic_array(pktlog), buf, buf_len); @@ -73,8 +73,8 @@ TRACE_EVENT(ath12k_htt_ppdu_stats, ), TP_fast_assign( - __assign_str(device, dev_name(ar->ab->dev)); - __assign_str(driver, dev_driver_string(ar->ab->dev)); + __assign_str(device); + __assign_str(driver); __entry->len = len; __entry->info = ar->pdev->timestamp.info; __entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us; @@ -117,8 +117,8 @@ TRACE_EVENT(ath12k_htt_rxdesc, ), TP_fast_assign( - __assign_str(device, dev_name(ar->ab->dev)); - __assign_str(driver, dev_driver_string(ar->ab->dev)); + __assign_str(device); + __assign_str(driver); __entry->len = len; __entry->type = type; __entry->info = ar->pdev->timestamp.info; @@ -140,6 +140,33 @@ TRACE_EVENT(ath12k_htt_rxdesc, ) ); +TRACE_EVENT(ath12k_wmi_diag, + TP_PROTO(struct ath12k_base *ab, const void *data, size_t len), + + TP_ARGS(ab, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __field(u16, len) + __dynamic_array(u8, data, len) + ), + + TP_fast_assign( + __assign_str(device); + __assign_str(driver); + __entry->len = len; + memcpy(__get_dynamic_array(data), data, len); + ), + + TP_printk( + "%s %s tlv diag len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ /* we don't want to use include/trace/events */ @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/skbuff.h> #include <linux/ctype.h> @@ -19,6 +19,7 @@ #include "mac.h" #include "hw.h" #include "peer.h" +#include "p2p.h" struct ath12k_wmi_svc_ready_parse { bool wmi_svc_bitmap_done; @@ -152,6 +153,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_service_available_event) }, [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, + [WMI_TAG_RFKILL_EVENT] = { + .min_len = sizeof(struct wmi_rfkill_state_change_event) }, [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, [WMI_TAG_HOST_SWFDA_EVENT] = { @@ -160,6 +163,14 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, + [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = { + .min_len = sizeof(struct wmi_twt_enable_event) }, + [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = { + .min_len = sizeof(struct wmi_twt_disable_event) }, + [WMI_TAG_P2P_NOA_INFO] = { + .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) }, + [WMI_TAG_P2P_NOA_EVENT] = { + .min_len = sizeof(struct wmi_p2p_noa_event) }, }; static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) @@ -177,18 +188,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config) { config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS; - - if (ab->num_radios == 2) { - config->num_peers = TARGET_NUM_PEERS(DBS); - config->num_tids = TARGET_NUM_TIDS(DBS); - } else if (ab->num_radios == 3) { - config->num_peers = TARGET_NUM_PEERS(DBS_SBS); - config->num_tids = TARGET_NUM_TIDS(DBS_SBS); - } else { - /* Control should not reach here */ - config->num_peers = TARGET_NUM_PEERS(SINGLE); - config->num_tids = TARGET_NUM_TIDS(SINGLE); - } + config->num_peers = ab->num_radios * + ath12k_core_get_max_peers_per_radio(ab); + config->num_tids = ath12k_core_get_max_num_tids(ab); config->num_offload_peers = TARGET_NUM_OFFLD_PEERS; config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS; config->num_peer_keys = TARGET_NUM_PEER_KEYS; @@ -226,6 +228,12 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, config->peer_map_unmap_version = 0x32; config->twt_ap_pdev_count = ab->num_radios; config->twt_ap_sta_count = 1000; + config->ema_max_vap_cnt = ab->num_radios; + config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD; + config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt; + + if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map)) + config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B; } void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -357,8 +365,8 @@ static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb, } static const void ** -ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr, - size_t len, gfp_t gfp) +ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, + struct sk_buff *skb, gfp_t gfp) { const void **tb; int ret; @@ -367,7 +375,7 @@ ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr, if (!tb) return ERR_PTR(-ENOMEM); - ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len); + ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len); if (ret) { kfree(tb); return ERR_PTR(ret); @@ -406,22 +414,22 @@ err_pull: int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, u32 cmd_id) { - struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab; + struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab; int ret = -EOPNOTSUPP; might_sleep(); - wait_event_timeout(wmi_sc->tx_credits_wq, ({ + wait_event_timeout(wmi_ab->tx_credits_wq, ({ ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); - if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags)) + if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -EAGAIN); }), WMI_SEND_TIMEOUT_HZ); if (ret == -EAGAIN) - ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); return ret; } @@ -491,13 +499,14 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, mac_caps = wmi_mac_phy_caps + phy_idx; - pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id); + pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); + pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps); pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands); pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); fw_pdev = &ab->fw_pdev[ab->fw_pdev_count]; fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands); - fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id); + fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id); ab->fw_pdev_count++; @@ -725,10 +734,24 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk return 0; } -struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len) +static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar, + struct ieee80211_tx_info *info) +{ + struct ath12k_base *ab = ar->ab; + u32 freq = 0; + + if (ab->hw_params->single_pdev_only && + ar->scan.is_roc && + (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + return freq; +} + +struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len) { struct sk_buff *skb; - struct ath12k_base *ab = wmi_sc->ab; + struct ath12k_base *ab = wmi_ab->ab; u32 round_len = roundup(len, 4); skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); @@ -750,6 +773,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id, { struct ath12k_wmi_pdev *wmi = ar->wmi; struct wmi_mgmt_send_cmd *cmd; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); struct wmi_tlv *frame_tlv; struct sk_buff *skb; u32 buf_len; @@ -768,7 +792,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id, sizeof(*cmd)); cmd->vdev_id = cpu_to_le32(vdev_id); cmd->desc_id = cpu_to_le32(buf_id); - cmd->chanfreq = 0; + cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info)); cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr)); cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr)); cmd->frame_len = cpu_to_le32(frame->len); @@ -797,6 +821,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, struct wmi_vdev_create_cmd *cmd; struct sk_buff *skb; struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams; + bool is_ml_vdev = is_valid_ether_addr(args->mld_addr); + struct wmi_vdev_create_mlo_params *ml_params; struct wmi_tlv *tlv; int ret, len; void *ptr; @@ -806,7 +832,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, * both the bands. */ len = sizeof(*cmd) + TLV_HDR_SIZE + - (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)); + (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) + + (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0); skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) @@ -821,9 +848,14 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, cmd->vdev_subtype = cpu_to_le32(args->subtype); cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX); cmd->pdev_id = cpu_to_le32(args->pdev_id); + cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags); + cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id); cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id); ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); + if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID) + cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0)); + ptr = skb->data + sizeof(*cmd); len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); @@ -835,20 +867,35 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, len = sizeof(*txrx_streams); txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, len); - txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; + txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G); txrx_streams->supported_tx_streams = - args->chains[NL80211_BAND_2GHZ].tx; + cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx); txrx_streams->supported_rx_streams = - args->chains[NL80211_BAND_2GHZ].rx; + cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx); txrx_streams++; txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, len); - txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; + txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G); txrx_streams->supported_tx_streams = - args->chains[NL80211_BAND_5GHZ].tx; + cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx); txrx_streams->supported_rx_streams = - args->chains[NL80211_BAND_5GHZ].rx; + cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx); + + ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); + + if (is_ml_vdev) { + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + sizeof(*ml_params)); + ptr += TLV_HDR_SIZE; + ml_params = ptr; + + ml_params->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS, + sizeof(*ml_params)); + ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr); + } ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", @@ -991,19 +1038,27 @@ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan, int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, bool restart) { + struct wmi_vdev_start_mlo_params *ml_params; + struct wmi_partner_link_info *partner_info; struct ath12k_wmi_pdev *wmi = ar->wmi; struct wmi_vdev_start_request_cmd *cmd; struct sk_buff *skb; struct ath12k_wmi_channel_params *chan; struct wmi_tlv *tlv; void *ptr; - int ret, len; + int ret, len, i, ml_arg_size = 0; if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) return -EINVAL; len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; + if (!restart && arg->ml.enabled) { + ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) + + TLV_HDR_SIZE + (arg->ml.num_partner_links * + sizeof(*partner_info)); + len += ml_arg_size; + } skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; @@ -1022,6 +1077,8 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, cmd->regdomain = cpu_to_le32(arg->regdomain); cmd->he_ops = cpu_to_le32(arg->he_ops); cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); + cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags); + cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id); if (!restart) { if (arg->ssid) { @@ -1049,11 +1106,66 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); /* Note: This is a nested TLV containing: - * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. + * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv].. */ ptr += sizeof(*tlv); + if (ml_arg_size) { + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + sizeof(*ml_params)); + ptr += TLV_HDR_SIZE; + + ml_params = ptr; + + ml_params->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS, + sizeof(*ml_params)); + + ml_params->flags = le32_encode_bits(arg->ml.enabled, + ATH12K_WMI_FLAG_MLO_ENABLED) | + le32_encode_bits(arg->ml.assoc_link, + ATH12K_WMI_FLAG_MLO_ASSOC_LINK) | + le32_encode_bits(arg->ml.mcast_link, + ATH12K_WMI_FLAG_MLO_MCAST_VDEV) | + le32_encode_bits(arg->ml.link_add, + ATH12K_WMI_FLAG_MLO_LINK_ADD); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n", + arg->vdev_id, ml_params->flags); + + ptr += sizeof(*ml_params); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + arg->ml.num_partner_links * + sizeof(*partner_info)); + ptr += TLV_HDR_SIZE; + + partner_info = ptr; + + for (i = 0; i < arg->ml.num_partner_links; i++) { + partner_info->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS, + sizeof(*partner_info)); + partner_info->vdev_id = + cpu_to_le32(arg->ml.partner_info[i].vdev_id); + partner_info->hw_link_id = + cpu_to_le32(arg->ml.partner_info[i].hw_link_id); + ether_addr_copy(partner_info->vdev_addr.addr, + arg->ml.partner_info[i].addr); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n", + partner_info->vdev_id, partner_info->hw_link_id, + partner_info->vdev_addr.addr); + + partner_info++; + } + + ptr = partner_info; + } + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", restart ? "restart" : "start", arg->vdev_id, arg->freq, arg->mode); @@ -1073,7 +1185,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, return ret; } -int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params) { struct ath12k_wmi_pdev *wmi = ar->wmi; struct wmi_vdev_up_cmd *cmd; @@ -1088,14 +1200,20 @@ int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid) cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD, sizeof(*cmd)); - cmd->vdev_id = cpu_to_le32(vdev_id); - cmd->vdev_assoc_id = cpu_to_le32(aid); + cmd->vdev_id = cpu_to_le32(params->vdev_id); + cmd->vdev_assoc_id = cpu_to_le32(params->aid); + + ether_addr_copy(cmd->vdev_bssid.addr, params->bssid); - ether_addr_copy(cmd->vdev_bssid.addr, bssid); + if (params->tx_bssid) { + ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid); + cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx); + cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt); + } ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", - vdev_id, aid, bssid); + params->vdev_id, params->aid, params->bssid); ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); if (ret) { @@ -1112,9 +1230,14 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, struct ath12k_wmi_pdev *wmi = ar->wmi; struct wmi_peer_create_cmd *cmd; struct sk_buff *skb; - int ret; + int ret, len; + struct wmi_peer_create_mlo_params *ml_param; + void *ptr; + struct wmi_tlv *tlv; - skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; @@ -1126,9 +1249,23 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, cmd->peer_type = cpu_to_le32(arg->peer_type); cmd->vdev_id = cpu_to_le32(arg->vdev_id); + ptr = skb->data + sizeof(*cmd); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + sizeof(*ml_param)); + ptr += TLV_HDR_SIZE; + ml_param = ptr; + ml_param->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS, + sizeof(*ml_param)); + if (arg->ml_enabled) + ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); + + ptr += sizeof(*ml_param); + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, - "WMI peer create vdev_id %d peer_addr %pM\n", - arg->vdev_id, arg->peer_addr); + "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n", + arg->vdev_id, arg->peer_addr, ml_param->flags); ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); if (ret) { @@ -1501,6 +1638,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, sizeof(*cmd)); cmd->req_type = cpu_to_le32(type); + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI bss chan info req type %d\n", type); @@ -1708,15 +1846,59 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar, return ret; } +int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, + const u8 *p2p_ie) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_p2p_go_set_beacon_ie_cmd *cmd; + size_t p2p_ie_len, aligned_len; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + int ret, len; + + p2p_ie_len = p2p_ie[1] + 2; + aligned_len = roundup(p2p_ie_len, sizeof(u32)); + + len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + ptr = skb->data; + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->ie_buf_len = cpu_to_le32(p2p_ie_len); + + ptr += sizeof(*cmd); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, + aligned_len); + memcpy(tlv->value, p2p_ie, p2p_ie_len); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); + dev_kfree_skb(skb); + } + + return ret; +} + int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, - struct sk_buff *bcn) + struct sk_buff *bcn, + struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args) { struct ath12k_wmi_pdev *wmi = ar->wmi; struct wmi_bcn_tmpl_cmd *cmd; struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; struct wmi_tlv *tlv; struct sk_buff *skb; + u32 ema_params = 0; void *ptr; int ret, len; size_t aligned_len = roundup(bcn->len, 4); @@ -1735,6 +1917,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]); cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]); cmd->buf_len = cpu_to_le32(bcn->len); + cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off); + if (ema_args) { + u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT); + u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX); + if (ema_args->bcn_index == 0) + u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST); + if (ema_args->bcn_index + 1 == ema_args->bcn_cnt) + u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST); + cmd->ema_params = cpu_to_le32(ema_params); + } ptr = skb->data + sizeof(*cmd); @@ -1834,7 +2026,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, if (arg->bw_160) cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); if (arg->bw_320) - cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ); + cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ); /* Typically if STBC is enabled for VHT it should be enabled * for HT as well @@ -1909,12 +2101,15 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, struct ath12k_wmi_vht_rate_set_params *mcs; struct ath12k_wmi_he_rate_set_params *he_mcs; struct ath12k_wmi_eht_rate_set_params *eht_mcs; + struct wmi_peer_assoc_mlo_params *ml_params; + struct wmi_peer_assoc_mlo_partner_info_params *partner_info; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; u32 peer_legacy_rates_align; u32 peer_ht_rates_align; int i, ret, len; + __le32 v; peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, sizeof(u32)); @@ -1926,8 +2121,13 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + sizeof(*mcs) + TLV_HDR_SIZE + (sizeof(*he_mcs) * arg->peer_he_mcs_count) + - TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) + - TLV_HDR_SIZE + TLV_HDR_SIZE; + TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count); + + if (arg->ml.enabled) + len += TLV_HDR_SIZE + sizeof(*ml_params) + + TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info)); + else + len += (2 * TLV_HDR_SIZE); skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) @@ -2051,12 +2251,38 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, ptr += sizeof(*he_mcs); } - /* MLO header tag with 0 length */ - len = 0; tlv = ptr; + len = arg->ml.enabled ? sizeof(*ml_params) : 0; tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); ptr += TLV_HDR_SIZE; + if (!len) + goto skip_ml_params; + + ml_params = ptr; + ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS, + len); + ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); + + if (arg->ml.assoc_link) + ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); + + if (arg->ml.primary_umac) + ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); + + if (arg->ml.logical_link_idx_valid) + ml_params->flags |= + cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID); + + if (arg->ml.peer_id_valid) + ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID); + + ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr); + ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx); + ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id); + ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id); + ptr += sizeof(*ml_params); +skip_ml_params: /* Loop through the EHT rate set */ len = arg->peer_eht_mcs_count * sizeof(*eht_mcs); tlv = ptr; @@ -2073,12 +2299,45 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, ptr += sizeof(*eht_mcs); } - /* ML partner links tag with 0 length */ - len = 0; tlv = ptr; + len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0; + /* fill ML Partner links */ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); ptr += TLV_HDR_SIZE; + if (len == 0) + goto send; + + for (i = 0; i < arg->ml.num_partner_links; i++) { + u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC; + + partner_info = ptr; + partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd, + sizeof(*partner_info)); + partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id); + partner_info->hw_link_id = + cpu_to_le32(arg->ml.partner_info[i].hw_link_id); + partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); + + if (arg->ml.partner_info[i].assoc_link) + partner_info->flags |= + cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); + + if (arg->ml.partner_info[i].primary_umac) + partner_info->flags |= + cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); + + if (arg->ml.partner_info[i].logical_link_idx_valid) { + v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID); + partner_info->flags |= v; + } + + partner_info->logical_link_idx = + cpu_to_le32(arg->ml.partner_info[i].logical_link_idx); + ptr += sizeof(*partner_info); + } + +send: ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n", cmd->vdev_id, cmd->peer_associd, arg->peer_mac, @@ -2128,7 +2387,7 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar, WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED; - arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT; + arg->scan_f_chan_stat_evnt = 1; arg->num_bssid = 1; /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be @@ -2239,12 +2498,6 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, if (arg->num_bssid) len += sizeof(*bssid) * arg->num_bssid; - len += TLV_HDR_SIZE; - if (arg->extraie.len) - extraie_len_with_pad = - roundup(arg->extraie.len, sizeof(u32)); - len += extraie_len_with_pad; - if (arg->num_hint_bssid) len += TLV_HDR_SIZE + arg->num_hint_bssid * sizeof(*hint_bssid); @@ -2253,6 +2506,18 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, len += TLV_HDR_SIZE + arg->num_hint_s_ssid * sizeof(*s_ssid); + len += TLV_HDR_SIZE; + if (arg->extraie.len) + extraie_len_with_pad = + roundup(arg->extraie.len, sizeof(u32)); + if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) { + len += extraie_len_with_pad; + } else { + ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n", + arg->extraie.len); + extraie_len_with_pad = 0; + } + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; @@ -2342,7 +2607,7 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); ptr += TLV_HDR_SIZE; - if (arg->extraie.len) + if (extraie_len_with_pad) memcpy(ptr, arg->extraie.ptr, arg->extraie.len); @@ -2651,6 +2916,149 @@ int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, return ret; } +int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, + const u8 *buf, size_t buf_len) +{ + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; + struct wmi_pdev_set_bios_interface_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *ptr; + u32 len, len_aligned; + int ret; + + len_aligned = roundup(buf_len, sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned; + + skb = ath12k_wmi_alloc_skb(wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); + cmd->param_type_id = cpu_to_le32(param_id); + cmd->length = cpu_to_le32(buf_len); + + ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned); + ptr += TLV_HDR_SIZE; + memcpy(ptr, buf, buf_len); + + ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], + skb, + WMI_PDEV_SET_BIOS_INTERFACE_CMDID); + if (ret) { + ath12k_warn(ab, + "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n", + param_id, ret); + dev_kfree_skb(skb); + } + + return 0; +} + +int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table) +{ + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; + struct wmi_pdev_set_bios_sar_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int ret; + u8 *buf_ptr; + u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned; + const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET; + const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET; + + sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32)); + sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN, + sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned + + TLV_HDR_SIZE + sar_dbs_backoff_len_aligned; + + skb = ath12k_wmi_alloc_skb(wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); + cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN); + cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, + sar_table_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN); + + buf_ptr += sar_table_len_aligned; + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, + sar_dbs_backoff_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); + + ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], + skb, + WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); + if (ret) { + ath12k_warn(ab, + "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n", + ret); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table) +{ + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; + struct wmi_pdev_set_bios_geo_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int ret; + u8 *buf_ptr; + u32 len, sar_geo_len_aligned; + const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET; + + sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned; + + skb = ath12k_wmi_alloc_skb(wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); + cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); + + ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], + skb, + WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); + if (ret) { + ath12k_warn(ab, + "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n", + ret); + dev_kfree_skb(skb); + } + + return ret; +} + int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 initiator, u32 reason) { @@ -3252,13 +3660,19 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size); wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters); wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id); - wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config); + wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config | + WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64); wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version); wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); + wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver, + WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION); wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); + wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt); + wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period); + wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET); } static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, @@ -3463,7 +3877,7 @@ int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, int ath12k_wmi_cmd_init(struct ath12k_base *ab) { - struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab; + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; struct ath12k_wmi_init_cmd_arg arg = {}; if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, @@ -3471,10 +3885,11 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.res_cfg.is_reg_cc_ext_event_supported = true; ab->hw_params->wmi_init(ab, &arg.res_cfg); + ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode; - arg.num_mem_chunks = wmi_sc->num_mem_chunks; - arg.hw_mode_id = wmi_sc->preferred_hw_mode; - arg.mem_chunks = wmi_sc->mem_chunks; + arg.num_mem_chunks = wmi_ab->num_mem_chunks; + arg.hw_mode_id = wmi_ab->preferred_hw_mode; + arg.mem_chunks = wmi_ab->mem_chunks; if (ab->hw_params->single_pdev_only) arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; @@ -3482,7 +3897,9 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.num_band_to_mac = ab->num_radios; ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); - return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg); + ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver; + + return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); } int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, @@ -3589,7 +4006,7 @@ int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ, sizeof(*cmd)); - cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id)); + cmd->pdev_id = cpu_to_le32(arg->pdev_id); cmd->module_id = cpu_to_le32(arg->module_id); cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo); cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi); @@ -3870,6 +4287,12 @@ static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc, ath12k_warn(soc, "failed to extract reg cap %d\n", i); return ret; } + + if (reg_cap.phy_id >= MAX_RADIOS) { + ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id); + return -EINVAL; + } + soc->hal_reg_cap[reg_cap.phy_id] = reg_cap; } return 0; @@ -3960,6 +4383,7 @@ static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab) { kfree(ab->db_caps); ab->db_caps = NULL; + ab->num_db_cap = 0; } static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab, @@ -4147,14 +4571,22 @@ static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band, __le32 cap_info_internal) { struct ath12k_band_cap *cap_band = &pdev->cap.band[band]; + u32 support_320mhz; u8 i; + if (band == NL80211_BAND_6GHZ) + support_320mhz = cap_band->eht_cap_phy_info[0] & + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++) cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]); for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++) cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]); + if (band == NL80211_BAND_6GHZ) + cap_band->eht_cap_phy_info[0] |= support_320mhz; + cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]); cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]); if (band != NL80211_BAND_2GHZ) { @@ -4176,14 +4608,23 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab, const struct ath12k_wmi_caps_ext_params *caps, struct ath12k_pdev *pdev) { - u32 bands; + struct ath12k_band_cap *cap_band; + u32 bands, support_320mhz; int i; if (ab->hw_params->single_pdev_only) { + if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) { + support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) & + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + cap_band = &pdev->cap.band[NL80211_BAND_6GHZ]; + cap_band->eht_cap_phy_info[0] |= support_320mhz; + return 0; + } + for (i = 0; i < ab->fw_pdev_count; i++) { struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i]; - if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) && + if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) && fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) { bands = fw_pdev->supported_bands; break; @@ -4221,6 +4662,9 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab, caps->eht_cap_info_internal); } + pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability); + pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability); + return 0; } @@ -4235,11 +4679,13 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag, return -EPROTO; if (ab->hw_params->single_pdev_only) { - if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id)) + if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) && + caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE) return 0; } else { for (i = 0; i < ab->num_radios; i++) { - if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id)) + if (ab->pdevs[i].pdev_id == + ath12k_wmi_caps_ext_get_pdev_id(caps)) break; } @@ -4342,7 +4788,7 @@ static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buf const struct wmi_vdev_start_resp_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4405,6 +4851,22 @@ static struct ath12k_reg_rule return reg_rule_ptr; } +static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule, + u32 num_reg_rules) +{ + u8 num_invalid_5ghz_rules = 0; + u32 count, start_freq; + + for (count = 0; count < num_reg_rules; count++) { + start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ); + + if (start_freq >= ATH12K_MIN_6G_FREQ) + num_invalid_5ghz_rules++; + } + + return num_invalid_5ghz_rules; +} + static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, struct sk_buff *skb, struct ath12k_reg_info *reg_info) @@ -4415,12 +4877,13 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, u32 num_2g_reg_rules, num_5g_reg_rules; u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; + u8 num_invalid_5ghz_ext_rules; u32 total_reg_rules = 0; int ret, i, j; ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n"); - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4508,20 +4971,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); - /* FIXME: Currently FW includes 6G reg rule also in 5G rule - * list for country US. - * Having same 6G reg rule in 5G and 6G rules list causes - * intersect check to be true, and same rules will be shown - * multiple times in iw cmd. So added hack below to avoid - * parsing 6G rule from 5G reg rule list, and this can be - * removed later, after FW updates to remove 6G reg rule - * from 5G rules list. - */ - if (memcmp(reg_info->alpha2, "US", 2) == 0) { - reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES; - num_5g_reg_rules = reg_info->num_5g_reg_rules; - } - reg_info->dfs_region = le32_to_cpu(ev->dfs_region); reg_info->phybitmap = le32_to_cpu(ev->phybitmap); reg_info->num_phy = le32_to_cpu(ev->num_phy); @@ -4579,10 +5028,11 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, } ath12k_dbg(ab, ATH12K_DBG_WMI, - "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d", + "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x", __func__, reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2g, reg_info->max_bw_2g, - reg_info->min_bw_5g, reg_info->max_bw_5g); + reg_info->min_bw_5g, reg_info->max_bw_5g, + reg_info->phybitmap); ath12k_dbg(ab, ATH12K_DBG_WMI, "num_2g_reg_rules %d num_5g_reg_rules %d", @@ -4623,8 +5073,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, } } + ext_wmi_reg_rule += num_2g_reg_rules; + + /* Firmware might include 6 GHz reg rule in 5 GHz rule list + * for few countries along with separate 6 GHz rule. + * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list + * causes intersect check to be true, and same rules will be + * shown multiple times in iw cmd. + * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list + */ + num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule, + num_5g_reg_rules); + + if (num_invalid_5ghz_ext_rules) { + ath12k_dbg(ab, ATH12K_DBG_WMI, + "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", + reg_info->alpha2, reg_info->num_5g_reg_rules, + num_invalid_5ghz_ext_rules); + + num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules; + reg_info->num_5g_reg_rules = num_5g_reg_rules; + } + if (num_5g_reg_rules) { - ext_wmi_reg_rule += num_2g_reg_rules; reg_info->reg_rules_5g_ptr = create_ext_reg_rules_from_wmi(num_5g_reg_rules, ext_wmi_reg_rule); @@ -4636,7 +5107,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, } } - ext_wmi_reg_rule += num_5g_reg_rules; + /* We have adjusted the number of 5 GHz reg rules above. But still those + * many rules needs to be adjusted in ext_wmi_reg_rule. + * + * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. + */ + ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules); for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { reg_info->reg_rules_6g_ap_ptr[i] = @@ -4705,7 +5181,7 @@ static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff * const struct wmi_peer_delete_resp_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4737,7 +5213,7 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab, const struct wmi_vdev_delete_resp_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4757,15 +5233,15 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab, return 0; } -static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf, - u32 len, u32 *vdev_id, - u32 *tx_status) +static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, + struct sk_buff *skb, + u32 *vdev_id, u32 *tx_status) { const void **tb; const struct wmi_bcn_tx_status_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4793,7 +5269,7 @@ static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_ const struct wmi_vdev_stopped_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4915,7 +5391,10 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) info->flags |= IEEE80211_TX_STAT_ACK; - ieee80211_tx_status_irqsafe(ar->hw, msdu); + if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status) + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + + ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu); num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); @@ -4937,7 +5416,7 @@ static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab, const struct wmi_mgmt_tx_compl_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4973,6 +5452,10 @@ static void ath12k_wmi_event_scan_started(struct ath12k *ar) break; case ATH12K_SCAN_STARTING: ar->scan.state = ATH12K_SCAN_RUNNING; + + if (ar->scan.is_roc) + ieee80211_ready_on_channel(ath12k_ar_to_hw(ar)); + complete(&ar->scan.started); break; } @@ -5043,6 +5526,8 @@ static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar) static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) { + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); + lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { @@ -5054,7 +5539,11 @@ static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) break; case ATH12K_SCAN_RUNNING: case ATH12K_SCAN_ABORTING: - ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq); + + if (ar->scan.is_roc && ar->scan.roc_freq == freq) + complete(&ar->scan.on_channel); + break; } } @@ -5108,7 +5597,7 @@ static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb, const struct wmi_scan_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5141,7 +5630,7 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf const struct wmi_peer_sta_kickout_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5168,7 +5657,7 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb, const struct wmi_roam_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5193,13 +5682,14 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb, static int freq_to_idx(struct ath12k *ar, int freq) { struct ieee80211_supported_band *sband; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); int band, ch, idx = 0; for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { if (!ar->mac.sbands[band].channels) continue; - sband = ar->hw->wiphy->bands[band]; + sband = hw->wiphy->bands[band]; if (!sband) continue; @@ -5212,14 +5702,14 @@ exit: return idx; } -static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf, - u32 len, struct wmi_chan_info_event *ch_info_ev) +static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_chan_info_event *ch_info_ev) { const void **tb; const struct wmi_chan_info_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5258,7 +5748,7 @@ ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, const struct wmi_pdev_bss_chan_info_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5298,7 +5788,7 @@ ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *sk const struct wmi_vdev_install_key_compl_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5329,7 +5819,7 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff const struct wmi_peer_assoc_conf_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5351,13 +5841,13 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff } static int -ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf, - u32 len, const struct wmi_pdev_temperature_event *ev) +ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb, + const struct wmi_pdev_temperature_event *ev) { const void **tb; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5389,7 +5879,13 @@ static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, static bool ath12k_reg_is_world_alpha(char *alpha) { - return alpha[0] == '0' && alpha[1] == '0'; + if (alpha[0] == '0' && alpha[1] == '0') + return true; + + if (alpha[0] == 'n' && alpha[1] == 'a') + return true; + + return false; } static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) @@ -5430,7 +5926,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk * event. Otherwise, it goes to fallback. */ if (ab->hw_params->single_pdev_only && - pdev_idx < ab->hw_params->num_rxmda_per_pdev) + pdev_idx < ab->hw_params->num_rxdma_per_pdev) goto mem_free; else goto fallback; @@ -5686,8 +6182,7 @@ static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *s { u32 vdev_id, tx_status; - if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, - &vdev_id, &tx_status) != 0) { + if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { ath12k_warn(ab, "failed to extract bcn tx status"); return; } @@ -5749,7 +6244,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) goto exit; } - if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) || + if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) || (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) { @@ -5760,8 +6255,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; - if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) { + if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ && + rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) { status->band = NL80211_BAND_6GHZ; + status->freq = rx_ev.chan_freq; } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { status->band = NL80211_BAND_2GHZ; } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) { @@ -5782,8 +6279,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) sband = &ar->mac.sbands[status->band]; - status->freq = ieee80211_channel_to_frequency(rx_ev.channel, - status->band); + if (status->band != NL80211_BAND_6GHZ) + status->freq = ieee80211_channel_to_frequency(rx_ev.channel, + status->band); + status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR; status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); @@ -5810,13 +6309,11 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) } } - /* TODO: Pending handle beacon implementation - *if (ieee80211_is_beacon(hdr->frame_control)) - * ath12k_mac_handle_beacon(ar, skb); - */ + if (ieee80211_is_beacon(hdr->frame_control)) + ath12k_mac_handle_beacon(ar, skb); ath12k_dbg(ab, ATH12K_DBG_MGMT, - "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", + "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); @@ -5825,7 +6322,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) status->freq, status->band, status->signal, status->rate_idx); - ieee80211_rx_ni(ar->hw, skb); + ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb); exit: rcu_read_unlock(); @@ -5861,8 +6358,9 @@ exit: rcu_read_unlock(); } -static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab, - u32 vdev_id) +static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab, + u32 vdev_id, + enum ath12k_scan_state state) { int i; struct ath12k_pdev *pdev; @@ -5874,8 +6372,9 @@ static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab, ar = pdev->ar; spin_lock_bh(&ar->data_lock); - if (ar->scan.state == ATH12K_SCAN_ABORTING && - ar->scan.vdev_id == vdev_id) { + if (ar->scan.state == state && + ar->scan.arvif && + ar->scan.arvif->vdev_id == vdev_id) { spin_unlock_bh(&ar->data_lock); return ar; } @@ -5904,10 +6403,15 @@ static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) * aborting scan's vdev id matches this event info. */ if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && - le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) - ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id)); - else + le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) { + ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), + ATH12K_SCAN_ABORTING); + if (!ar) + ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), + ATH12K_SCAN_RUNNING); + } else { ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); + } if (!ar) { ath12k_warn(ab, "Received scan event for unknown vdev"); @@ -5992,7 +6496,7 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff goto exit; } - sta = ieee80211_find_sta_by_ifaddr(ar->hw, + sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), arg.mac_addr, NULL); if (!sta) { ath12k_warn(ab, "Spurious quick kickout for STA %pM\n", @@ -6014,42 +6518,44 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_roam_event roam_ev = {}; struct ath12k *ar; + u32 vdev_id; + u8 roam_reason; if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) { ath12k_warn(ab, "failed to extract roam event"); return; } + vdev_id = le32_to_cpu(roam_ev.vdev_id); + roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason), + WMI_ROAM_REASON_MASK); + ath12k_dbg(ab, ATH12K_DBG_WMI, - "wmi roam event vdev %u reason 0x%08x rssi %d\n", - roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); + "wmi roam event vdev %u reason %d rssi %d\n", + vdev_id, roam_reason, roam_ev.rssi); rcu_read_lock(); - ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id)); + ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { - ath12k_warn(ab, "invalid vdev id in roam ev %d", - roam_ev.vdev_id); + ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id); rcu_read_unlock(); return; } - if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX) + if (roam_reason >= WMI_ROAM_REASON_MAX) ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", - roam_ev.reason, roam_ev.vdev_id); + roam_reason, vdev_id); - switch (le32_to_cpu(roam_ev.reason)) { + switch (roam_reason) { case WMI_ROAM_REASON_BEACON_MISS: - /* TODO: Pending beacon miss and connection_loss_work - * implementation - * ath12k_mac_handle_beacon_miss(ar, vdev_id); - */ + ath12k_mac_handle_beacon_miss(ar, vdev_id); break; case WMI_ROAM_REASON_BETTER_AP: case WMI_ROAM_REASON_LOW_RSSI: case WMI_ROAM_REASON_SUITABLE_AP_FOUND: case WMI_ROAM_REASON_HO_FAILED: ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", - roam_ev.reason, roam_ev.vdev_id); + roam_reason, vdev_id); break; } @@ -6065,7 +6571,7 @@ static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) /* HW channel counters frequency value in hertz */ u32 cc_freq_hz = ab->cc_freq_hz; - if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) { + if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { ath12k_warn(ab, "failed to extract chan info event"); return; } @@ -6350,7 +6856,7 @@ static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab, const struct wmi_pdev_ctl_failsafe_chk_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -6384,7 +6890,9 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, const u32 *vdev_ids) { int i; - struct ath12k_vif *arvif; + struct ieee80211_bss_conf *conf; + struct ath12k_link_vif *arvif; + struct ath12k_vif *ahvif; /* Finish CSA once the switch count becomes NULL */ if (ev->current_switch_count) @@ -6399,9 +6907,23 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, vdev_ids[i]); continue; } + ahvif = arvif->ahvif; + + if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { + ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n", + arvif->link_id); + continue; + } + + conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]); + if (!conf) { + ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n", + ahvif->vif->addr, arvif->link_id); + continue; + } - if (arvif->is_up && arvif->vif->bss_conf.csa_active) - ieee80211_csa_finish(arvif->vif); + if (arvif->is_up && conf->csa_active) + ieee80211_csa_finish(ahvif->vif, 0); } rcu_read_unlock(); } @@ -6415,7 +6937,7 @@ ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab, const u32 *vdev_ids; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -6445,11 +6967,12 @@ static void ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb) { const void **tb; + struct ath12k_mac_get_any_chanctx_conf_arg arg; const struct ath12k_wmi_pdev_radar_event *ev; struct ath12k *ar; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -6470,6 +6993,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, ev->freq_offset, ev->sidx); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); if (!ar) { @@ -6478,15 +7003,26 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff goto exit; } + arg.ar = ar; + arg.chanctx_conf = NULL; + ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), + ath12k_mac_get_any_chanctx_conf_iter, &arg); + if (!arg.chanctx_conf) { + ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n"); + goto exit; + } + ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n", ev->pdev_id); if (ar->dfs_block_radar_events) ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); else - ieee80211_radar_detected(ar->hw); + ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf); exit: + rcu_read_unlock(); + kfree(tb); } @@ -6497,7 +7033,7 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, struct ath12k *ar; struct wmi_pdev_temperature_event ev = {0}; - if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) { + if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) { ath12k_warn(ab, "failed to extract pdev temperature event"); return; } @@ -6505,11 +7041,16 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, ath12k_dbg(ab, ATH12K_DBG_WMI, "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id)); if (!ar) { ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); - return; + goto exit; } + +exit: + rcu_read_unlock(); } static void ath12k_fils_discovery_event(struct ath12k_base *ab, @@ -6519,7 +7060,7 @@ static void ath12k_fils_discovery_event(struct ath12k_base *ab, const struct wmi_fils_discovery_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, @@ -6549,7 +7090,7 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, const struct wmi_probe_resp_tx_status_event *ev; int ret; - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath12k_warn(ab, @@ -6574,6 +7115,337 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, kfree(tb); } +static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_p2p_noa_event *ev; + const struct ath12k_wmi_p2p_noa_info *noa; + struct ath12k *ar; + int ret, vdev_id; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_P2P_NOA_EVENT]; + noa = tb[WMI_TAG_P2P_NOA_INFO]; + + if (!ev || !noa) { + ret = -EPROTO; + goto out; + } + + vdev_id = __le32_to_cpu(ev->vdev_id); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "wmi tlv p2p noa vdev_id %i descriptors %u\n", + vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)); + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); + if (!ar) { + ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n", + vdev_id); + ret = -EINVAL; + goto unlock; + } + + ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); + + ret = 0; + +unlock: + rcu_read_unlock(); +out: + kfree(tb); + return ret; +} + +static void ath12k_rfkill_state_change_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_rfkill_state_change_event *ev; + const void **tb; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_RFKILL_EVENT]; + if (!ev) { + kfree(tb); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", + le32_to_cpu(ev->gpio_pin_num), + le32_to_cpu(ev->int_type), + le32_to_cpu(ev->radio_state)); + + spin_lock_bh(&ab->base_lock); + ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON)); + spin_unlock_bh(&ab->base_lock); + + queue_work(ab->workqueue, &ab->rfkill_work); + kfree(tb); +} + +static void +ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + trace_ath12k_wmi_diag(ab, skb->data, skb->len); +} + +static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_twt_enable_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n", + ret); + return; + } + + ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch twt enable wmi event\n"); + goto exit; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n", + le32_to_cpu(ev->pdev_id), + le32_to_cpu(ev->status)); + +exit: + kfree(tb); +} + +static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_twt_disable_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n", + ret); + return; + } + + ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch twt disable wmi event\n"); + goto exit; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n", + le32_to_cpu(ev->pdev_id), + le32_to_cpu(ev->status)); + +exit: + kfree(tb); +} + +static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + const struct wmi_wow_ev_pg_fault_param *pf_param; + const struct wmi_wow_ev_param *param; + struct wmi_wow_ev_arg *arg = data; + int pf_len; + + switch (tag) { + case WMI_TAG_WOW_EVENT_INFO: + param = ptr; + arg->wake_reason = le32_to_cpu(param->wake_reason); + ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n", + arg->wake_reason, wow_reason(arg->wake_reason)); + break; + + case WMI_TAG_ARRAY_BYTE: + if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) { + pf_param = ptr; + pf_len = le32_to_cpu(pf_param->len); + if (pf_len > len - sizeof(pf_len) || + pf_len < 0) { + ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n", + pf_len); + return -EINVAL; + } + ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n", + pf_len); + ath12k_dbg_dump(ab, ATH12K_DBG_WMI, + "wow_reason_page_fault packet present", + "wow_pg_fault ", + pf_param->data, + pf_len); + } + break; + default: + break; + } + + return 0; +} + +static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_wow_ev_arg arg = { }; + int ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_wow_wakeup_host_parse, + &arg); + if (ret) { + ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n", + ret); + return; + } + + complete(&ab->wow.wakeup_completed); +} + +static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_gtk_offload_status_event *ev; + struct ath12k_link_vif *arvif; + __be64 replay_ctr_be; + u64 replay_ctr; + const void **tb; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch gtk offload status ev"); + kfree(tb); + return; + } + + rcu_read_lock(); + arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id)); + if (!arvif) { + rcu_read_unlock(); + ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n", + le32_to_cpu(ev->vdev_id)); + kfree(tb); + return; + } + + replay_ctr = le64_to_cpu(ev->replay_ctr); + arvif->rekey_data.replay_ctr = replay_ctr; + ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n", + le32_to_cpu(ev->refresh_cnt), replay_ctr); + + /* supplicant expects big-endian replay counter */ + replay_ctr_be = cpu_to_be64(replay_ctr); + + ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid, + (void *)&replay_ctr_be, GFP_ATOMIC); + + rcu_read_unlock(); + + kfree(tb); +} + +static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_mlo_setup_complete_event *ev; + struct ath12k *ar = NULL; + struct ath12k_pdev *pdev; + const void **tb; + int ret, i; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n", + ret); + return; + } + + ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch mlo setup complete event\n"); + kfree(tb); + return; + } + + if (le32_to_cpu(ev->pdev_id) > ab->num_radios) + goto skip_lookup; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) { + ar = pdev->ar; + break; + } + } + +skip_lookup: + if (!ar) { + ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n", + ev->pdev_id, ev->status); + goto out; + } + + ar->mlo_setup_status = le32_to_cpu(ev->status); + complete(&ar->mlo_setup_done); + +out: + kfree(tb); +} + +static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_mlo_teardown_complete_event *ev; + const void **tb; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE]; + if (!ev) { + ath12k_warn(ab, "failed to fetch teardown complete event\n"); + kfree(tb); + return; + } + + kfree(tb); +} + static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -6666,14 +7538,17 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: ath12k_probe_resp_tx_status_event(ab, skb); break; - /* add Unsupported events here */ - case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: - case WMI_PEER_OPER_MODE_CHANGE_EVENTID: + case WMI_RFKILL_STATE_CHANGE_EVENTID: + ath12k_rfkill_state_change_event(ab, skb); + break; case WMI_TWT_ENABLE_EVENTID: + ath12k_wmi_twt_enable_event(ab, skb); + break; case WMI_TWT_DISABLE_EVENTID: - case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: - ath12k_dbg(ab, ATH12K_DBG_WMI, - "ignoring unsupported event 0x%x\n", id); + ath12k_wmi_twt_disable_event(ab, skb); + break; + case WMI_P2P_NOA_EVENTID: + ath12k_wmi_p2p_noa_event(ab, skb); break; case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb); @@ -6681,6 +7556,34 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_VDEV_DELETE_RESP_EVENTID: ath12k_vdev_delete_resp_event(ab, skb); break; + case WMI_DIAG_EVENTID: + ath12k_wmi_diag_event(ab, skb); + break; + case WMI_WOW_WAKEUP_HOST_EVENTID: + ath12k_wmi_event_wow_wakeup_host(ab, skb); + break; + case WMI_GTK_OFFLOAD_STATUS_EVENTID: + ath12k_wmi_gtk_offload_status_event(ab, skb); + break; + case WMI_MLO_SETUP_COMPLETE_EVENTID: + ath12k_wmi_event_mlo_setup_complete(ab, skb); + break; + case WMI_MLO_TEARDOWN_COMPLETE_EVENTID: + ath12k_wmi_event_teardown_complete(ab, skb); + break; + /* add Unsupported events (rare) here */ + case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: + case WMI_PEER_OPER_MODE_CHANGE_EVENTID: + case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: + ath12k_dbg(ab, ATH12K_DBG_WMI, + "ignoring unsupported event 0x%x\n", id); + break; + /* add Unsupported events (frequent) here */ + case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID: + case WMI_MGMT_RX_FW_CONSUMED_EVENTID: + case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: + /* debug might flood hence silently ignore (no-op) */ + break; /* TODO: Add remaining events */ default: ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); @@ -6695,9 +7598,11 @@ static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab, u32 pdev_idx) { int status; - u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL, - ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, - ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 }; + static const u32 svc_id[] = { + ATH12K_HTC_SVC_ID_WMI_CONTROL, + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 + }; struct ath12k_htc_svc_conn_req conn_req = {}; struct ath12k_htc_svc_conn_resp conn_resp = {}; @@ -6783,13 +7688,13 @@ ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, int ath12k_wmi_simulate_radar(struct ath12k *ar) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; u32 dfs_args[DFS_MAX_TEST_ARGS]; struct wmi_unit_test_cmd wmi_ut; bool arvif_found = false; list_for_each_entry(arvif, &ar->arvifs, list) { - if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) { + if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) { arvif_found = true; break; } @@ -6890,3 +7795,709 @@ void ath12k_wmi_detach(struct ath12k_base *ab) ath12k_wmi_free_dbring_caps(ab); } + +int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg) +{ + struct wmi_hw_data_filter_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_hw_data_filter_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->enable = cpu_to_le32(arg->enable ? 1 : 0); + + /* Set all modes in case of disable */ + if (arg->enable) + cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap); + else + cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi hw data filter enable %d filter_bitmap 0x%x\n", + arg->enable, arg->hw_filter_bitmap); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); +} + +int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar) +{ + struct wmi_wow_host_wakeup_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, + sizeof(*cmd)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); +} + +int ath12k_wmi_wow_enable(struct ath12k *ar) +{ + struct wmi_wow_enable_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_enable_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD, + sizeof(*cmd)); + + cmd->enable = cpu_to_le32(1); + cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED); + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n"); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); +} + +int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct wmi_wow_add_del_event_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->is_add = cpu_to_le32(enable); + cmd->event_bitmap = cpu_to_le32((1 << event)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", + wow_wakeup_event(event), enable, vdev_id); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); +} + +int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset) +{ + struct wmi_wow_add_pattern_cmd *cmd; + struct wmi_wow_bitmap_pattern_params *bitmap; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*cmd) + + sizeof(*tlv) + /* array struct */ + sizeof(*bitmap) + /* bitmap */ + sizeof(*tlv) + /* empty ipv4 sync */ + sizeof(*tlv) + /* empty ipv6 sync */ + sizeof(*tlv) + /* empty magic */ + sizeof(*tlv) + /* empty info timeout */ + sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + /* cmd */ + ptr = skb->data; + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->pattern_id = cpu_to_le32(pattern_id); + cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); + + ptr += sizeof(*cmd); + + /* bitmap */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap)); + + ptr += sizeof(*tlv); + + bitmap = ptr; + bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T, + sizeof(*bitmap)); + memcpy(bitmap->patternbuf, pattern, pattern_len); + memcpy(bitmap->bitmaskbuf, mask, pattern_len); + bitmap->pattern_offset = cpu_to_le32(pattern_offset); + bitmap->pattern_len = cpu_to_le32(pattern_len); + bitmap->bitmask_len = cpu_to_le32(pattern_len); + bitmap->pattern_id = cpu_to_le32(pattern_id); + + ptr += sizeof(*bitmap); + + /* ipv4 sync */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* ipv6 sync */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* magic */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* pattern info timeout */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); + + ptr += sizeof(*tlv); + + /* ratelimit interval */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n", + vdev_id, pattern_id, pattern_offset, pattern_len); + + ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ", + bitmap->patternbuf, pattern_len); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ", + bitmap->bitmaskbuf, pattern_len); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); +} + +int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id) +{ + struct wmi_wow_del_pattern_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->pattern_id = cpu_to_le32(pattern_id); + cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", + vdev_id, pattern_id); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); +} + +static struct sk_buff * +ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno) +{ + struct nlo_configured_params *nlo_list; + size_t len, nlo_list_len, channel_list_len; + struct wmi_wow_nlo_config_cmd *cmd; + __le32 *channel_list; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + u32 i; + + len = sizeof(*cmd) + + sizeof(*tlv) + + /* TLV place holder for array of structures + * nlo_configured_params(nlo_list) + */ + sizeof(*tlv); + /* TLV place holder for array of uint32 channel_list */ + + channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; + len += channel_list_len; + + nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; + len += nlo_list_len; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = skb->data; + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(pno->vdev_id); + cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); + + /* current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = cpu_to_le32(pno->active_max_time); + cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time); + + if (pno->do_passive_scan) + cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); + + cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period); + cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period); + cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles); + cmd->delay_start_time = cpu_to_le32(pno->delay_start_time); + + if (pno->enable_pno_scan_randomization) { + cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); + ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); + ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); + } + + ptr += sizeof(*cmd); + + /* nlo_configured_params(nlo_list) */ + cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len); + + ptr += sizeof(*tlv); + nlo_list = ptr; + for (i = 0; i < pno->uc_networks_count; i++) { + tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); + tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, + sizeof(*nlo_list)); + + nlo_list[i].ssid.valid = cpu_to_le32(1); + nlo_list[i].ssid.ssid.ssid_len = + cpu_to_le32(pno->a_networks[i].ssid.ssid_len); + memcpy(nlo_list[i].ssid.ssid.ssid, + pno->a_networks[i].ssid.ssid, + le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); + + if (pno->a_networks[i].rssi_threshold && + pno->a_networks[i].rssi_threshold > -300) { + nlo_list[i].rssi_cond.valid = cpu_to_le32(1); + nlo_list[i].rssi_cond.rssi = + cpu_to_le32(pno->a_networks[i].rssi_threshold); + } + + nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1); + nlo_list[i].bcast_nw_type.bcast_nw_type = + cpu_to_le32(pno->a_networks[i].bcast_nw_type); + } + + ptr += nlo_list_len; + cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len); + ptr += sizeof(*tlv); + channel_list = ptr; + + for (i = 0; i < pno->a_networks[0].channel_count; i++) + channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", + vdev_id); + + return skb; +} + +static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar, + u32 vdev_id) +{ + struct wmi_wow_nlo_config_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len); + + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi tlv stop pno config vdev_id %d\n", vdev_id); + return skb; +} + +int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno_scan) +{ + struct sk_buff *skb; + + if (pno_scan->enable) + skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); + else + skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id); + + if (IS_ERR_OR_NULL(skb)) + return -ENOMEM; + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); +} + +static void ath12k_wmi_fill_ns_offload(struct ath12k *ar, + struct wmi_arp_ns_offload_arg *offload, + void **ptr, + bool enable, + bool ext) +{ + struct wmi_ns_offload_params *ns; + struct wmi_tlv *tlv; + void *buf_ptr = *ptr; + u32 ns_cnt, ns_ext_tuples; + int i, max_offloads; + + ns_cnt = offload->ipv6_count; + + tlv = buf_ptr; + + if (ext) { + ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + ns_ext_tuples * sizeof(*ns)); + i = WMI_MAX_NS_OFFLOADS; + max_offloads = offload->ipv6_count; + } else { + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + WMI_MAX_NS_OFFLOADS * sizeof(*ns)); + i = 0; + max_offloads = WMI_MAX_NS_OFFLOADS; + } + + buf_ptr += sizeof(*tlv); + + for (; i < max_offloads; i++) { + ns = buf_ptr; + ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE, + sizeof(*ns)); + + if (enable) { + if (i < ns_cnt) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID); + + memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); + memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); + + if (offload->ipv6_type[i]) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST); + + memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); + + if (!is_zero_ether_addr(ns->target_mac.addr)) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi index %d ns_solicited %pI6 target %pI6", + i, ns->solicitation_ipaddr, + ns->target_ipaddr[0]); + } + + buf_ptr += sizeof(*ns); + } + + *ptr = buf_ptr; +} + +static void ath12k_wmi_fill_arp_offload(struct ath12k *ar, + struct wmi_arp_ns_offload_arg *offload, + void **ptr, + bool enable) +{ + struct wmi_arp_offload_params *arp; + struct wmi_tlv *tlv; + void *buf_ptr = *ptr; + int i; + + /* fill arp tuple */ + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); + buf_ptr += sizeof(*tlv); + + for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + arp = buf_ptr; + arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE, + sizeof(*arp)); + + if (enable && i < offload->ipv4_count) { + /* Copy the target ip addr and flags */ + arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID); + memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4", + arp->target_ipaddr); + } + + buf_ptr += sizeof(*arp); + } + + *ptr = buf_ptr; +} + +int ath12k_wmi_arp_ns_offload(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct wmi_arp_ns_offload_arg *offload, + bool enable) +{ + struct wmi_set_arp_ns_offload_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *buf_ptr; + size_t len; + u8 ns_cnt, ns_ext_tuples = 0; + + ns_cnt = offload->ipv6_count; + + len = sizeof(*cmd) + + sizeof(*tlv) + + WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) + + sizeof(*tlv) + + WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params); + + if (ns_cnt > WMI_MAX_NS_OFFLOADS) { + ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; + len += sizeof(*tlv) + + ns_ext_tuples * sizeof(struct wmi_ns_offload_params); + } + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + buf_ptr = skb->data; + cmd = buf_ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD, + sizeof(*cmd)); + cmd->flags = cpu_to_le32(0); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples); + + buf_ptr += sizeof(*cmd); + + ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); + ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); + + if (ns_ext_tuples) + ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); +} + +int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, + struct ath12k_link_vif *arvif, bool enable) +{ + struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; + struct wmi_gtk_rekey_offload_cmd *cmd; + struct sk_buff *skb; + __le64 replay_ctr; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + + if (enable) { + cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE); + + /* the length in rekey_data and cmd is equal */ + memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); + memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); + + replay_ctr = cpu_to_le64(rekey_data->replay_ctr); + memcpy(cmd->replay_ctr, &replay_ctr, + sizeof(replay_ctr)); + } else { + cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", + arvif->vdev_id, enable); + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, + struct ath12k_link_vif *arvif) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n", + arvif->vdev_id); + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath12k_wmi_sta_keepalive(struct ath12k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct wmi_sta_keepalive_arp_resp_params *arp; + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_sta_keepalive_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd) + sizeof(*arp); + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_keepalive_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->enabled = cpu_to_le32(arg->enabled); + cmd->interval = cpu_to_le32(arg->interval); + cmd->method = cpu_to_le32(arg->method); + + arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1); + arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE, + sizeof(*arp)); + if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || + arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { + arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr); + arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr); + ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", + arg->vdev_id, arg->enabled, arg->method, arg->interval); + + return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); +} + +int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params) +{ + struct wmi_mlo_setup_cmd *cmd; + struct ath12k_wmi_pdev *wmi = ar->wmi; + u32 *partner_links, num_links; + int i, ret, buf_len, arg_len; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + + num_links = mlo_params->num_partner_links; + arg_len = num_links * sizeof(u32); + buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_mlo_setup_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD, + sizeof(*cmd)); + cmd->mld_group_id = mlo_params->group_id; + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); + ptr = skb->data + sizeof(*cmd); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); + ptr += TLV_HDR_SIZE; + + partner_links = ptr; + for (i = 0; i < num_links; i++) + partner_links[i] = mlo_params->partner_link_id[i]; + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n", + ret); + dev_kfree_skb(skb); + return ret; + } + + return 0; +} + +int ath12k_wmi_mlo_ready(struct ath12k *ar) +{ + struct wmi_mlo_ready_cmd *cmd; + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_mlo_ready_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n", + ret); + dev_kfree_skb(skb); + return ret; + } + + return 0; +} + +int ath12k_wmi_mlo_teardown(struct ath12k *ar) +{ + struct wmi_mlo_teardown_cmd *cmd; + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_mlo_teardown_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); + cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON; + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n", + ret); + dev_kfree_skb(skb); + return ret; + } + + return 0; +} @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_WMI_H @@ -24,6 +24,7 @@ struct ath12k_base; struct ath12k; +struct ath12k_link_vif; /* There is no signed version of __le32, so for a temporary solution come * up with our own version. The idea is from fs/ntfs/endian.h. @@ -164,14 +165,6 @@ struct wmi_tlv { #define WLAN_SCAN_MAX_HINT_BSSID 10 #define MAX_RNR_BSS 5 -#define WLAN_SCAN_MAX_HINT_S_SSID 10 -#define WLAN_SCAN_MAX_HINT_BSSID 10 -#define MAX_RNR_BSS 5 - -#define WLAN_SCAN_PARAMS_MAX_SSID 16 -#define WLAN_SCAN_PARAMS_MAX_BSSID 4 -#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 - #define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1 #define WMI_BA_MODE_BUFFER_SIZE_256 3 @@ -292,6 +285,7 @@ enum wmi_cmd_group { WMI_GRP_TWT = 0x3e, WMI_GRP_MOTION_DET = 0x3f, WMI_GRP_SPATIAL_REUSE = 0x40, + WMI_GRP_MLO = 0x48, }; #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1) @@ -361,6 +355,9 @@ enum wmi_tlv_cmd_id { WMI_PDEV_DMA_RING_CFG_REQ_CMDID, WMI_PDEV_HE_TB_ACTION_FRM_CMDID, WMI_PDEV_PKTLOG_FILTER_CMDID, + WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID = 0x4044, + WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID = 0x4045, + WMI_PDEV_SET_BIOS_INTERFACE_CMDID = 0x404A, WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV), WMI_VDEV_DELETE_CMDID, WMI_VDEV_START_REQUEST_CMDID, @@ -669,6 +666,10 @@ enum wmi_tlv_cmd_id { WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID = WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE), WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID, + WMI_MLO_LINK_SET_ACTIVE_CMDID = WMI_TLV_CMD(WMI_GRP_MLO), + WMI_MLO_SETUP_CMDID, + WMI_MLO_READY_CMDID, + WMI_MLO_TEARDOWN_CMDID, }; enum wmi_tlv_event_id { @@ -710,6 +711,8 @@ enum wmi_tlv_event_id { WMI_PDEV_RAP_INFO_EVENTID, WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID, WMI_SERVICE_READY_EXT2_EVENTID, + WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID = + WMI_SERVICE_READY_EXT2_EVENTID + 4, WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV), WMI_VDEV_STOPPED_EVENTID, WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID, @@ -751,6 +754,7 @@ enum wmi_tlv_event_id { WMI_TBTTOFFSET_EXT_UPDATE_EVENTID, WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID, WMI_HOST_FILS_DISCOVERY_EVENTID, + WMI_MGMT_RX_FW_CONSUMED_EVENTID = WMI_HOST_FILS_DISCOVERY_EVENTID + 3, WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG), WMI_TX_ADDBA_COMPLETE_EVENTID, WMI_BA_RSP_SSN_EVENTID, @@ -849,6 +853,8 @@ enum wmi_tlv_event_id { WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL), WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL), WMI_SAP_OFL_DEL_STA_EVENTID, + WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID = + WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL), WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB), WMI_OCB_GET_TSF_TIMER_RESP_EVENTID, WMI_DCC_GET_STATS_RESP_EVENTID, @@ -878,6 +884,9 @@ enum wmi_tlv_event_id { WMI_TWT_DEL_DIALOG_EVENTID, WMI_TWT_PAUSE_DIALOG_EVENTID, WMI_TWT_RESUME_DIALOG_EVENTID, + WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MLO), + WMI_MLO_SETUP_COMPLETE_EVENTID, + WMI_MLO_TEARDOWN_COMPLETE_EVENTID, }; enum wmi_tlv_pdev_param { @@ -1146,25 +1155,27 @@ enum wmi_tlv_vdev_param { }; enum wmi_tlv_peer_flags { - WMI_TLV_PEER_AUTH = 0x00000001, - WMI_TLV_PEER_QOS = 0x00000002, - WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004, - WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010, - WMI_TLV_PEER_APSD = 0x00000800, - WMI_TLV_PEER_HT = 0x00001000, - WMI_TLV_PEER_40MHZ = 0x00002000, - WMI_TLV_PEER_STBC = 0x00008000, - WMI_TLV_PEER_LDPC = 0x00010000, - WMI_TLV_PEER_DYN_MIMOPS = 0x00020000, - WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000, - WMI_TLV_PEER_SPATIAL_MUX = 0x00200000, - WMI_TLV_PEER_VHT = 0x02000000, - WMI_TLV_PEER_80MHZ = 0x04000000, - WMI_TLV_PEER_PMF = 0x08000000, + WMI_PEER_AUTH = 0x00000001, + WMI_PEER_QOS = 0x00000002, + WMI_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_PEER_HE = 0x00000400, + WMI_PEER_APSD = 0x00000800, + WMI_PEER_HT = 0x00001000, + WMI_PEER_40MHZ = 0x00002000, + WMI_PEER_STBC = 0x00008000, + WMI_PEER_LDPC = 0x00010000, + WMI_PEER_DYN_MIMOPS = 0x00020000, + WMI_PEER_STATIC_MIMOPS = 0x00040000, + WMI_PEER_SPATIAL_MUX = 0x00200000, + WMI_PEER_TWT_REQ = 0x00400000, + WMI_PEER_TWT_RESP = 0x00800000, + WMI_PEER_VHT = 0x02000000, + WMI_PEER_80MHZ = 0x04000000, + WMI_PEER_PMF = 0x08000000, WMI_PEER_IS_P2P_CAPABLE = 0x20000000, WMI_PEER_160MHZ = 0x40000000, WMI_PEER_SAFEMODE_EN = 0x80000000, - }; enum wmi_tlv_peer_flags_ext { @@ -1931,6 +1942,22 @@ enum wmi_tlv_tag { WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9, WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT, WMI_TAG_EHT_RATE_SET = 0x3C4, + WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5, + WMI_TAG_MLO_TX_SEND_PARAMS, + WMI_TAG_MLO_PARTNER_LINK_PARAMS, + WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC, + WMI_TAG_MLO_SETUP_CMD = 0x3C9, + WMI_TAG_MLO_SETUP_COMPLETE_EVENT, + WMI_TAG_MLO_READY_CMD, + WMI_TAG_MLO_TEARDOWN_CMD, + WMI_TAG_MLO_TEARDOWN_COMPLETE, + WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0, + WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5, + WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6, + WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7, + WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8, + WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9, + WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB, WMI_TAG_MAX }; @@ -2154,10 +2181,18 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213, WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219, WMI_TLV_SERVICE_EXT2_MSG = 220, + WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253, WMI_MAX_EXT_SERVICE = 256, WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281, + + WMI_TLV_SERVICE_11BE = 289, + + WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS = 361, + + WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT = 365, + WMI_MAX_EXT2_SERVICE, }; @@ -2194,8 +2229,11 @@ enum wmi_peer_param { WMI_PEER_SET_MAX_TX_RATE = 17, WMI_PEER_SET_MIN_TX_RATE = 18, WMI_PEER_SET_DEFAULT_ROUTING = 19, + WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP = 39, }; +#define WMI_PEER_PUNCTURE_BITMAP GENMASK(23, 8) + enum wmi_slot_time { WMI_VDEV_SLOT_TIME_LONG = 1, WMI_VDEV_SLOT_TIME_SHORT = 2, @@ -2217,6 +2255,7 @@ enum wmi_peer_chwidth { WMI_PEER_CHWIDTH_40MHZ = 1, WMI_PEER_CHWIDTH_80MHZ = 2, WMI_PEER_CHWIDTH_160MHZ = 3, + WMI_PEER_CHWIDTH_320MHZ = 4, }; enum wmi_beacon_gen_mode { @@ -2281,6 +2320,13 @@ struct ath12k_wmi_host_mem_chunk_arg { u32 req_id; }; +enum ath12k_peer_metadata_version { + ATH12K_PEER_METADATA_V0, + ATH12K_PEER_METADATA_V1, + ATH12K_PEER_METADATA_V1A, + ATH12K_PEER_METADATA_V1B +}; + struct ath12k_wmi_resource_config_arg { u32 num_vdevs; u32 num_peers; @@ -2343,6 +2389,9 @@ struct ath12k_wmi_resource_config_arg { u32 sched_params; u32 twt_ap_pdev_count; u32 twt_ap_sta_count; + enum ath12k_peer_metadata_version peer_metadata_ver; + u32 ema_max_vap_cnt; + u32 ema_max_profile_period; bool is_reg_cc_ext_event_supported; }; @@ -2396,6 +2445,9 @@ struct wmi_init_cmd { } __packed; #define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4 +#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4) +#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5) +#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9) struct ath12k_wmi_resource_config_params { __le32 tlv_header; @@ -2536,9 +2588,17 @@ struct ath12k_wmi_hw_mode_cap_params { #define WMI_MAX_HECAP_PHY_SIZE (3) +/* pdev_id is present in lower 16 bits of pdev_and_hw_link_ids in + * ath12k_wmi_mac_phy_caps_params & ath12k_wmi_caps_ext_params. + * + * hw_link_id is present in higher 16 bits of pdev_and_hw_link_ids. + */ +#define WMI_CAPS_PARAMS_PDEV_ID GENMASK(15, 0) +#define WMI_CAPS_PARAMS_HW_LINK_ID GENMASK(31, 16) + struct ath12k_wmi_mac_phy_caps_params { __le32 hw_mode_id; - __le32 pdev_id; + __le32 pdev_and_hw_link_ids; __le32 phy_id; __le32 supported_flags; __le32 supported_bands; @@ -2588,6 +2648,19 @@ struct ath12k_wmi_soc_hal_reg_caps_params { __le32 num_phy; } __packed; +enum wmi_channel_width { + WMI_CHAN_WIDTH_20 = 0, + WMI_CHAN_WIDTH_40 = 1, + WMI_CHAN_WIDTH_80 = 2, + WMI_CHAN_WIDTH_160 = 3, + WMI_CHAN_WIDTH_80P80 = 4, + WMI_CHAN_WIDTH_5 = 5, + WMI_CHAN_WIDTH_10 = 6, + WMI_CHAN_WIDTH_165 = 7, + WMI_CHAN_WIDTH_160P160 = 8, + WMI_CHAN_WIDTH_320 = 9, +}; + #define WMI_MAX_EHTCAP_MAC_SIZE 2 #define WMI_MAX_EHTCAP_PHY_SIZE 3 #define WMI_MAX_EHTCAP_RATE_SET 3 @@ -2630,13 +2703,7 @@ struct wmi_service_ready_ext2_event { struct ath12k_wmi_caps_ext_params { __le32 hw_mode_id; - union { - struct { - __le16 pdev_id; - __le16 hw_link_id; - } __packed ath12k_wmi_pdev_to_link_map; - __le32 pdev_id; - }; + __le32 pdev_and_hw_link_ids; __le32 phy_id; __le32 wireless_modes_ext; __le32 eht_cap_mac_info_2ghz[WMI_MAX_EHTCAP_MAC_SIZE]; @@ -2649,6 +2716,8 @@ struct ath12k_wmi_caps_ext_params { __le32 eht_cap_info_internal; __le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE]; __le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE]; + __le32 eml_capability; + __le32 mld_capability; } __packed; /* 2 word representation of MAC addr */ @@ -2697,6 +2766,9 @@ struct ath12k_wmi_vdev_create_arg { } chains[NUM_NL80211_BANDS]; u32 pdev_id; u8 if_stats_id; + u32 mbssid_flags; + u32 mbssid_tx_vdev_id; + u8 mld_addr[ETH_ALEN]; }; #define ATH12K_MAX_VDEV_STATS_ID 0x30 @@ -2710,14 +2782,44 @@ struct wmi_vdev_create_cmd { struct ath12k_wmi_mac_addr_params vdev_macaddr; __le32 num_cfg_txrx_streams; __le32 pdev_id; + __le32 mbssid_flags; + __le32 mbssid_tx_vdev_id; + __le32 vdev_stats_id_valid; __le32 vdev_stats_id; } __packed; struct ath12k_wmi_vdev_txrx_streams_params { __le32 tlv_header; - u32 band; - u32 supported_tx_streams; - u32 supported_rx_streams; + __le32 band; + __le32 supported_tx_streams; + __le32 supported_rx_streams; +} __packed; + +struct wmi_vdev_create_mlo_params { + __le32 tlv_header; + struct ath12k_wmi_mac_addr_params mld_macaddr; +} __packed; + +#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0) +#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1) +#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2) +#define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID BIT(3) +#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4) +#define ATH12K_WMI_FLAG_MLO_MCAST_VDEV BIT(5) +#define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT BIT(6) +#define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE BIT(7) +#define ATH12K_WMI_FLAG_MLO_LINK_ADD BIT(8) + +struct wmi_vdev_start_mlo_params { + __le32 tlv_header; + __le32 flags; +} __packed; + +struct wmi_partner_link_info { + __le32 tlv_header; + __le32 vdev_id; + __le32 hw_link_id; + struct ath12k_wmi_mac_addr_params vdev_addr; } __packed; struct wmi_vdev_delete_cmd { @@ -2725,14 +2827,23 @@ struct wmi_vdev_delete_cmd { __le32 vdev_id; } __packed; +struct ath12k_wmi_vdev_up_params { + u32 vdev_id; + u32 aid; + const u8 *bssid; + const u8 *tx_bssid; + u32 nontx_profile_idx; + u32 nontx_profile_cnt; +}; + struct wmi_vdev_up_cmd { __le32 tlv_header; __le32 vdev_id; __le32 vdev_assoc_id; struct ath12k_wmi_mac_addr_params vdev_bssid; - struct ath12k_wmi_mac_addr_params trans_bssid; - __le32 profile_idx; - __le32 profile_num; + struct ath12k_wmi_mac_addr_params tx_vdev_bssid; + __le32 nontx_profile_idx; + __le32 nontx_profile_cnt; } __packed; struct wmi_vdev_stop_cmd { @@ -2758,6 +2869,14 @@ struct ath12k_wmi_ssid_params { #define ATH12K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ) +enum wmi_vdev_mbssid_flags { + WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0), + WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP = BIT(1), + WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP = BIT(2), + WMI_VDEV_MBSSID_FLAGS_EMA_MODE = BIT(3), + WMI_VDEV_MBSSID_FLAGS_SCAN_MODE_VAP = BIT(4), +}; + struct wmi_vdev_start_request_cmd { __le32 tlv_header; __le32 vdev_id; @@ -2776,7 +2895,7 @@ struct wmi_vdev_start_request_cmd { __le32 cac_duration_ms; __le32 regdomain; __le32 min_data_rate; - __le32 mbssid_flags; + __le32 mbssid_flags; /* uses enum wmi_vdev_mbssid_flags */ __le32 mbssid_tx_vdev_id; __le32 eht_ops; __le32 punct_bitmap; @@ -2846,6 +2965,27 @@ enum wmi_phy_mode { MODE_MAX = 33, }; +#define ATH12K_WMI_MLO_MAX_LINKS 4 + +struct wmi_ml_partner_info { + u32 vdev_id; + u32 hw_link_id; + u8 addr[ETH_ALEN]; + bool assoc_link; + bool primary_umac; + bool logical_link_idx_valid; + u32 logical_link_idx; +}; + +struct wmi_ml_arg { + bool enabled; + bool assoc_link; + bool mcast_link; + bool link_add; + u8 num_partner_links; + struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS]; +}; + struct wmi_vdev_start_req_arg { u32 vdev_id; u32 freq; @@ -2883,12 +3023,19 @@ struct wmi_vdev_start_req_arg { u32 mbssid_flags; u32 mbssid_tx_vdev_id; u32 punct_bitmap; + struct wmi_ml_arg ml; }; struct ath12k_wmi_peer_create_arg { const u8 *peer_addr; u32 peer_type; u32 vdev_id; + bool ml_enabled; +}; + +struct wmi_peer_create_mlo_params { + __le32 tlv_header; + __le32 flags; }; struct ath12k_wmi_pdev_set_regdomain_arg { @@ -3058,6 +3205,7 @@ struct wmi_pdev_bss_chan_info_req_cmd { __le32 tlv_header; /* ref wmi_bss_chan_info_req_type */ __le32 req_type; + __le32 pdev_id; } __packed; struct wmi_ap_ps_peer_cmd { @@ -3140,7 +3288,7 @@ struct ath12k_wmi_element_info_arg { #define WLAN_SCAN_PARAMS_MAX_SSID 16 #define WLAN_SCAN_PARAMS_MAX_BSSID 4 -#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 +#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512 /* Values lower than this may be refused by some firmware revisions with a scan * completion with a timedout reason. @@ -3264,24 +3412,19 @@ struct ath12k_wmi_scan_req_arg { u32 vdev_id; u32 pdev_id; enum wmi_scan_priority scan_priority; - union { - struct { - u32 scan_ev_started:1, - scan_ev_completed:1, - scan_ev_bss_chan:1, - scan_ev_foreign_chan:1, - scan_ev_dequeued:1, - scan_ev_preempted:1, - scan_ev_start_failed:1, - scan_ev_restarted:1, - scan_ev_foreign_chn_exit:1, - scan_ev_invalid:1, - scan_ev_gpio_timeout:1, - scan_ev_suspended:1, - scan_ev_resumed:1; - }; - u32 scan_events; - }; + u32 scan_ev_started:1, + scan_ev_completed:1, + scan_ev_bss_chan:1, + scan_ev_foreign_chan:1, + scan_ev_dequeued:1, + scan_ev_preempted:1, + scan_ev_start_failed:1, + scan_ev_restarted:1, + scan_ev_foreign_chn_exit:1, + scan_ev_invalid:1, + scan_ev_gpio_timeout:1, + scan_ev_suspended:1, + scan_ev_resumed:1; u32 dwell_time_active; u32 dwell_time_active_2g; u32 dwell_time_passive; @@ -3294,36 +3437,31 @@ struct ath12k_wmi_scan_req_arg { u32 idle_time; u32 max_scan_time; u32 probe_delay; - union { - struct { - u32 scan_f_passive:1, - scan_f_bcast_probe:1, - scan_f_cck_rates:1, - scan_f_ofdm_rates:1, - scan_f_chan_stat_evnt:1, - scan_f_filter_prb_req:1, - scan_f_bypass_dfs_chn:1, - scan_f_continue_on_err:1, - scan_f_offchan_mgmt_tx:1, - scan_f_offchan_data_tx:1, - scan_f_promisc_mode:1, - scan_f_capture_phy_err:1, - scan_f_strict_passive_pch:1, - scan_f_half_rate:1, - scan_f_quarter_rate:1, - scan_f_force_active_dfs_chn:1, - scan_f_add_tpc_ie_in_probe:1, - scan_f_add_ds_ie_in_probe:1, - scan_f_add_spoofed_mac_in_probe:1, - scan_f_add_rand_seq_in_probe:1, - scan_f_en_ie_whitelist_in_probe:1, - scan_f_forced:1, - scan_f_2ghz:1, - scan_f_5ghz:1, - scan_f_80mhz:1; - }; - u32 scan_flags; - }; + u32 scan_f_passive:1, + scan_f_bcast_probe:1, + scan_f_cck_rates:1, + scan_f_ofdm_rates:1, + scan_f_chan_stat_evnt:1, + scan_f_filter_prb_req:1, + scan_f_bypass_dfs_chn:1, + scan_f_continue_on_err:1, + scan_f_offchan_mgmt_tx:1, + scan_f_offchan_data_tx:1, + scan_f_promisc_mode:1, + scan_f_capture_phy_err:1, + scan_f_strict_passive_pch:1, + scan_f_half_rate:1, + scan_f_quarter_rate:1, + scan_f_force_active_dfs_chn:1, + scan_f_add_tpc_ie_in_probe:1, + scan_f_add_ds_ie_in_probe:1, + scan_f_add_spoofed_mac_in_probe:1, + scan_f_add_rand_seq_in_probe:1, + scan_f_en_ie_whitelist_in_probe:1, + scan_f_forced:1, + scan_f_2ghz:1, + scan_f_5ghz:1, + scan_f_80mhz:1; enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode; u32 burst_duration; u32 num_chan; @@ -3350,34 +3488,6 @@ struct wmi_bssid_arg { const u8 *bssid; }; -struct wmi_start_scan_arg { - u32 scan_id; - u32 scan_req_id; - u32 vdev_id; - u32 scan_priority; - u32 notify_scan_events; - u32 dwell_time_active; - u32 dwell_time_passive; - u32 min_rest_time; - u32 max_rest_time; - u32 repeat_probe_time; - u32 probe_spacing_time; - u32 idle_time; - u32 max_scan_time; - u32 probe_delay; - u32 scan_ctrl_flags; - - u32 ie_len; - u32 n_channels; - u32 n_ssids; - u32 n_bssids; - - u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; - u32 channels[64]; - struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; - struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; -}; - #define WMI_SCAN_STOP_ONE 0x00000000 #define WMI_SCAN_STOP_VAP_ALL 0x01000000 #define WMI_SCAN_STOP_ALL 0x04000000 @@ -3483,8 +3593,49 @@ struct wmi_get_pdev_temperature_cmd { __le32 pdev_id; } __packed; +#define WMI_P2P_MAX_NOA_DESCRIPTORS 4 + +struct wmi_p2p_noa_event { + __le32 vdev_id; +} __packed; + +struct ath12k_wmi_p2p_noa_descriptor { + __le32 type_count; /* 255: continuous schedule, 0: reserved */ + __le32 duration; /* Absent period duration in micro seconds */ + __le32 interval; /* Absent period interval in micro seconds */ + __le32 start_time; /* 32 bit tsf time when in starts */ +} __packed; + +#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0) +#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8) +#define WMI_P2P_NOA_INFO_OPP_PS BIT(16) +#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17) +#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24) + +struct ath12k_wmi_p2p_noa_info { + /* Bit 0 - Flag to indicate an update in NOA schedule + * Bits 7-1 - Reserved + * Bits 15-8 - Index (identifies the instance of NOA sub element) + * Bit 16 - Opp PS state of the AP + * Bits 23-17 - Ctwindow in TUs + * Bits 31-24 - Number of NOA descriptors + */ + __le32 noa_attr; + struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS]; +} __packed; + #define WMI_BEACON_TX_BUFFER_SIZE 512 +#define WMI_EMA_BEACON_CNT GENMASK(7, 0) +#define WMI_EMA_BEACON_IDX GENMASK(15, 8) +#define WMI_EMA_BEACON_FIRST GENMASK(23, 16) +#define WMI_EMA_BEACON_LAST GENMASK(31, 24) + +struct ath12k_wmi_bcn_tmpl_ema_arg { + u8 bcn_cnt; + u8 bcn_index; +}; + struct wmi_bcn_tmpl_cmd { __le32 tlv_header; __le32 vdev_id; @@ -3495,6 +3646,17 @@ struct wmi_bcn_tmpl_cmd { __le32 csa_event_bitmap; __le32 mbssid_ie_offset; __le32 esp_ie_offset; + __le32 csc_switch_count_offset; + __le32 csc_event_bitmap; + __le32 mu_edca_ie_offset; + __le32 feature_enable_bitmap; + __le32 ema_params; +} __packed; + +struct wmi_p2p_go_set_beacon_ie_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 ie_buf_len; } __packed; struct wmi_vdev_install_key_cmd { @@ -3540,6 +3702,24 @@ struct wmi_vdev_install_key_arg { #define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1 #define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2 +#define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \ + (ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1) + +struct peer_assoc_mlo_params { + bool enabled; + bool assoc_link; + bool primary_umac; + bool peer_id_valid; + bool logical_link_idx_valid; + bool bridge_peer; + u8 mld_addr[ETH_ALEN]; + u32 logical_link_idx; + u32 ml_peer_id; + u32 ieee_link_id; + u8 num_partner_links; + struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS]; +}; + struct wmi_rate_set_arg { u32 num_rates; u8 rates[WMI_MAX_SUPPORTED_RATES]; @@ -3614,8 +3794,36 @@ struct ath12k_wmi_peer_assoc_arg { u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET]; struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet; u32 punct_bitmap; + bool is_assoc; + struct peer_assoc_mlo_params ml; }; +#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0) +#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1) +#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2) +#define ATH12K_WMI_FLAG_MLO_LINK_ID_VALID BIT(3) +#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4) + +struct wmi_peer_assoc_mlo_partner_info_params { + __le32 tlv_header; + __le32 vdev_id; + __le32 hw_link_id; + __le32 flags; + __le32 logical_link_idx; +} __packed; + +struct wmi_peer_assoc_mlo_params { + __le32 tlv_header; + __le32 flags; + struct ath12k_wmi_mac_addr_params mld_addr; + __le32 logical_link_idx; + __le32 ml_peer_id; + __le32 ieee_link_id; + __le32 emlsr_trans_timeout_us; + __le32 emlsr_trans_delay_us; + __le32 emlsr_padding_delay_us; +} __packed; + struct wmi_peer_assoc_complete_cmd { __le32 tlv_header; struct ath12k_wmi_mac_addr_params peer_macaddr; @@ -3841,31 +4049,6 @@ struct wmi_unit_test_cmd { #define MAX_SUPPORTED_RATES 128 -#define WMI_PEER_AUTH 0x00000001 -#define WMI_PEER_QOS 0x00000002 -#define WMI_PEER_NEED_PTK_4_WAY 0x00000004 -#define WMI_PEER_NEED_GTK_2_WAY 0x00000010 -#define WMI_PEER_HE 0x00000400 -#define WMI_PEER_APSD 0x00000800 -#define WMI_PEER_HT 0x00001000 -#define WMI_PEER_40MHZ 0x00002000 -#define WMI_PEER_STBC 0x00008000 -#define WMI_PEER_LDPC 0x00010000 -#define WMI_PEER_DYN_MIMOPS 0x00020000 -#define WMI_PEER_STATIC_MIMOPS 0x00040000 -#define WMI_PEER_SPATIAL_MUX 0x00200000 -#define WMI_PEER_TWT_REQ 0x00400000 -#define WMI_PEER_TWT_RESP 0x00800000 -#define WMI_PEER_VHT 0x02000000 -#define WMI_PEER_80MHZ 0x04000000 -#define WMI_PEER_PMF 0x08000000 -/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000. - * Need to be cleaned up - */ -#define WMI_PEER_IS_P2P_CAPABLE 0x20000000 -#define WMI_PEER_160MHZ 0x40000000 -#define WMI_PEER_SAFEMODE_EN 0x80000000 - struct ath12k_wmi_vht_rate_set_params { __le32 tlv_header; __le32 rx_max_rate; @@ -3890,7 +4073,6 @@ struct ath12k_wmi_eht_rate_set_params { #define MAX_REG_RULES 10 #define REG_ALPHA2_LEN 2 #define MAX_6G_REG_RULES 5 -#define REG_US_5G_NUM_REG_RULES 4 enum wmi_start_event_param { WMI_VDEV_START_RESP_EVENT = 0, @@ -4033,7 +4215,6 @@ struct wmi_vdev_stopped_event { } __packed; struct wmi_pdev_bss_chan_info_event { - __le32 pdev_id; __le32 freq; /* Units in MHz */ __le32 noise_floor; /* units are dBm */ /* rx clear - how often the channel was unused */ @@ -4051,6 +4232,7 @@ struct wmi_pdev_bss_chan_info_event { /*rx_cycle cnt for my bss in 64bits format */ __le32 rx_bss_cycle_count_low; __le32 rx_bss_cycle_count_high; + __le32 pdev_id; } __packed; #define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0 @@ -4195,6 +4377,9 @@ struct wmi_peer_sta_kickout_event { struct ath12k_wmi_mac_addr_params peer_macaddr; } __packed; +#define WMI_ROAM_REASON_MASK GENMASK(3, 0) +#define WMI_ROAM_SUBNET_STATUS_MASK GENMASK(5, 4) + enum wmi_roam_reason { WMI_ROAM_REASON_BETTER_AP = 1, WMI_ROAM_REASON_BEACON_MISS = 2, @@ -4757,17 +4942,15 @@ struct wmi_probe_tmpl_cmd { __le32 buf_len; } __packed; -#define WMI_MAX_MEM_REQS 32 - -#define MAX_RADIOS 3 +#define MAX_RADIOS 2 +#define WMI_MLO_CMD_TIMEOUT_HZ (5 * HZ) #define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) #define WMI_SEND_TIMEOUT_HZ (3 * HZ) struct ath12k_wmi_pdev { struct ath12k_wmi_base *wmi_ab; enum ath12k_htc_ep_id eid; - const struct wmi_peer_flags_map *peer_flags; u32 rx_decap_mode; }; @@ -4781,7 +4964,6 @@ struct ath12k_wmi_base { struct completion unified_ready; DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE); wait_queue_head_t tx_credits_wq; - const struct wmi_peer_flags_map *peer_flags; u32 num_mem_chunks; u32 rx_decap_mode; struct ath12k_wmi_host_mem_chunk_arg mem_chunks[WMI_MAX_MEM_REQS]; @@ -4791,8 +4973,661 @@ struct ath12k_wmi_base { struct ath12k_wmi_target_cap_arg *targ_cap; }; +struct wmi_pdev_set_bios_interface_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 param_type_id; + __le32 length; +} __packed; + +enum wmi_bios_param_type { + WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE = 0, + WMI_BIOS_PARAM_TAS_CONFIG_TYPE = 1, + WMI_BIOS_PARAM_TAS_DATA_TYPE = 2, + + /* bandedge control power */ + WMI_BIOS_PARAM_TYPE_BANDEDGE = 3, + + WMI_BIOS_PARAM_TYPE_MAX, +}; + +struct wmi_pdev_set_bios_sar_table_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 sar_len; + __le32 dbs_backoff_len; +} __packed; + +struct wmi_pdev_set_bios_geo_table_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 geo_len; +} __packed; + #define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024) +enum wmi_sys_cap_info_flags { + WMI_SYS_CAP_INFO_RXTX_LED = BIT(0), + WMI_SYS_CAP_INFO_RFKILL = BIT(1), +}; + +#define WMI_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0) +#define WMI_RFKILL_CFG_RADIO_LEVEL BIT(6) +#define WMI_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7) + +enum wmi_rfkill_enable_radio { + WMI_RFKILL_ENABLE_RADIO_ON = 0, + WMI_RFKILL_ENABLE_RADIO_OFF = 1, +}; + +enum wmi_rfkill_radio_state { + WMI_RFKILL_RADIO_STATE_OFF = 1, + WMI_RFKILL_RADIO_STATE_ON = 2, +}; + +struct wmi_rfkill_state_change_event { + __le32 gpio_pin_num; + __le32 int_type; + __le32 radio_state; +} __packed; + +struct wmi_twt_enable_event { + __le32 pdev_id; + __le32 status; +} __packed; + +struct wmi_twt_disable_event { + __le32 pdev_id; + __le32 status; +} __packed; + +struct wmi_mlo_setup_cmd { + __le32 tlv_header; + __le32 mld_group_id; + __le32 pdev_id; +} __packed; + +struct wmi_mlo_setup_arg { + __le32 group_id; + u8 num_partner_links; + u8 *partner_link_id; +}; + +struct wmi_mlo_ready_cmd { + __le32 tlv_header; + __le32 pdev_id; +} __packed; + +enum wmi_mlo_tear_down_reason_code_type { + WMI_MLO_TEARDOWN_SSR_REASON, +}; + +struct wmi_mlo_teardown_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 reason_code; +} __packed; + +struct wmi_mlo_setup_complete_event { + __le32 pdev_id; + __le32 status; +} __packed; + +struct wmi_mlo_teardown_complete_event { + __le32 pdev_id; + __le32 status; +} __packed; + +/* WOW structures */ +enum wmi_wow_wakeup_event { + WOW_BMISS_EVENT = 0, + WOW_BETTER_AP_EVENT, + WOW_DEAUTH_RECVD_EVENT, + WOW_MAGIC_PKT_RECVD_EVENT, + WOW_GTK_ERR_EVENT, + WOW_FOURWAY_HSHAKE_EVENT, + WOW_EAPOL_RECVD_EVENT, + WOW_NLO_DETECTED_EVENT, + WOW_DISASSOC_RECVD_EVENT, + WOW_PATTERN_MATCH_EVENT, + WOW_CSA_IE_EVENT, + WOW_PROBE_REQ_WPS_IE_EVENT, + WOW_AUTH_REQ_EVENT, + WOW_ASSOC_REQ_EVENT, + WOW_HTT_EVENT, + WOW_RA_MATCH_EVENT, + WOW_HOST_AUTO_SHUTDOWN_EVENT, + WOW_IOAC_MAGIC_EVENT, + WOW_IOAC_SHORT_EVENT, + WOW_IOAC_EXTEND_EVENT, + WOW_IOAC_TIMER_EVENT, + WOW_DFS_PHYERR_RADAR_EVENT, + WOW_BEACON_EVENT, + WOW_CLIENT_KICKOUT_EVENT, + WOW_EVENT_MAX, +}; + +enum wmi_wow_interface_cfg { + WOW_IFACE_PAUSE_ENABLED, + WOW_IFACE_PAUSE_DISABLED +}; + +#define C2S(x) case x: return #x + +static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev) +{ + switch (ev) { + C2S(WOW_BMISS_EVENT); + C2S(WOW_BETTER_AP_EVENT); + C2S(WOW_DEAUTH_RECVD_EVENT); + C2S(WOW_MAGIC_PKT_RECVD_EVENT); + C2S(WOW_GTK_ERR_EVENT); + C2S(WOW_FOURWAY_HSHAKE_EVENT); + C2S(WOW_EAPOL_RECVD_EVENT); + C2S(WOW_NLO_DETECTED_EVENT); + C2S(WOW_DISASSOC_RECVD_EVENT); + C2S(WOW_PATTERN_MATCH_EVENT); + C2S(WOW_CSA_IE_EVENT); + C2S(WOW_PROBE_REQ_WPS_IE_EVENT); + C2S(WOW_AUTH_REQ_EVENT); + C2S(WOW_ASSOC_REQ_EVENT); + C2S(WOW_HTT_EVENT); + C2S(WOW_RA_MATCH_EVENT); + C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT); + C2S(WOW_IOAC_MAGIC_EVENT); + C2S(WOW_IOAC_SHORT_EVENT); + C2S(WOW_IOAC_EXTEND_EVENT); + C2S(WOW_IOAC_TIMER_EVENT); + C2S(WOW_DFS_PHYERR_RADAR_EVENT); + C2S(WOW_BEACON_EVENT); + C2S(WOW_CLIENT_KICKOUT_EVENT); + C2S(WOW_EVENT_MAX); + default: + return NULL; + } +} + +enum wmi_wow_wake_reason { + WOW_REASON_UNSPECIFIED = -1, + WOW_REASON_NLOD = 0, + WOW_REASON_AP_ASSOC_LOST, + WOW_REASON_LOW_RSSI, + WOW_REASON_DEAUTH_RECVD, + WOW_REASON_DISASSOC_RECVD, + WOW_REASON_GTK_HS_ERR, + WOW_REASON_EAP_REQ, + WOW_REASON_FOURWAY_HS_RECV, + WOW_REASON_TIMER_INTR_RECV, + WOW_REASON_PATTERN_MATCH_FOUND, + WOW_REASON_RECV_MAGIC_PATTERN, + WOW_REASON_P2P_DISC, + WOW_REASON_WLAN_HB, + WOW_REASON_CSA_EVENT, + WOW_REASON_PROBE_REQ_WPS_IE_RECV, + WOW_REASON_AUTH_REQ_RECV, + WOW_REASON_ASSOC_REQ_RECV, + WOW_REASON_HTT_EVENT, + WOW_REASON_RA_MATCH, + WOW_REASON_HOST_AUTO_SHUTDOWN, + WOW_REASON_IOAC_MAGIC_EVENT, + WOW_REASON_IOAC_SHORT_EVENT, + WOW_REASON_IOAC_EXTEND_EVENT, + WOW_REASON_IOAC_TIMER_EVENT, + WOW_REASON_ROAM_HO, + WOW_REASON_DFS_PHYERR_RADADR_EVENT, + WOW_REASON_BEACON_RECV, + WOW_REASON_CLIENT_KICKOUT_EVENT, + WOW_REASON_PAGE_FAULT = 0x3a, + WOW_REASON_DEBUG_TEST = 0xFF, +}; + +static inline const char *wow_reason(enum wmi_wow_wake_reason reason) +{ + switch (reason) { + C2S(WOW_REASON_UNSPECIFIED); + C2S(WOW_REASON_NLOD); + C2S(WOW_REASON_AP_ASSOC_LOST); + C2S(WOW_REASON_LOW_RSSI); + C2S(WOW_REASON_DEAUTH_RECVD); + C2S(WOW_REASON_DISASSOC_RECVD); + C2S(WOW_REASON_GTK_HS_ERR); + C2S(WOW_REASON_EAP_REQ); + C2S(WOW_REASON_FOURWAY_HS_RECV); + C2S(WOW_REASON_TIMER_INTR_RECV); + C2S(WOW_REASON_PATTERN_MATCH_FOUND); + C2S(WOW_REASON_RECV_MAGIC_PATTERN); + C2S(WOW_REASON_P2P_DISC); + C2S(WOW_REASON_WLAN_HB); + C2S(WOW_REASON_CSA_EVENT); + C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV); + C2S(WOW_REASON_AUTH_REQ_RECV); + C2S(WOW_REASON_ASSOC_REQ_RECV); + C2S(WOW_REASON_HTT_EVENT); + C2S(WOW_REASON_RA_MATCH); + C2S(WOW_REASON_HOST_AUTO_SHUTDOWN); + C2S(WOW_REASON_IOAC_MAGIC_EVENT); + C2S(WOW_REASON_IOAC_SHORT_EVENT); + C2S(WOW_REASON_IOAC_EXTEND_EVENT); + C2S(WOW_REASON_IOAC_TIMER_EVENT); + C2S(WOW_REASON_ROAM_HO); + C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT); + C2S(WOW_REASON_BEACON_RECV); + C2S(WOW_REASON_CLIENT_KICKOUT_EVENT); + C2S(WOW_REASON_PAGE_FAULT); + C2S(WOW_REASON_DEBUG_TEST); + default: + return NULL; + } +} + +#undef C2S + +#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148 +#define WOW_DEFAULT_BITMASK_SIZE 148 + +#define WOW_MIN_PATTERN_SIZE 1 +#define WOW_MAX_PATTERN_SIZE 148 +#define WOW_MAX_PKT_OFFSET 128 +#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \ + sizeof(struct rfc1042_hdr)) +#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \ + offsetof(struct ieee80211_hdr_3addr, addr1)) + +struct wmi_wow_bitmap_pattern_params { + __le32 tlv_header; + u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE]; + u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE]; + __le32 pattern_offset; + __le32 pattern_len; + __le32 bitmask_len; + __le32 pattern_id; +} __packed; + +struct wmi_wow_add_pattern_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 pattern_id; + __le32 pattern_type; +} __packed; + +struct wmi_wow_del_pattern_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 pattern_id; + __le32 pattern_type; +} __packed; + +enum wmi_tlv_pattern_type { + WOW_PATTERN_MIN = 0, + WOW_BITMAP_PATTERN = WOW_PATTERN_MIN, + WOW_IPV4_SYNC_PATTERN, + WOW_IPV6_SYNC_PATTERN, + WOW_WILD_CARD_PATTERN, + WOW_TIMER_PATTERN, + WOW_MAGIC_PATTERN, + WOW_IPV6_RA_PATTERN, + WOW_IOAC_PKT_PATTERN, + WOW_IOAC_TMR_PATTERN, + WOW_PATTERN_MAX +}; + +struct wmi_wow_add_del_event_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 is_add; + __le32 event_bitmap; +} __packed; + +struct wmi_wow_enable_cmd { + __le32 tlv_header; + __le32 enable; + __le32 pause_iface_config; + __le32 flags; +} __packed; + +struct wmi_wow_host_wakeup_cmd { + __le32 tlv_header; + __le32 reserved; +} __packed; + +struct wmi_wow_ev_param { + __le32 vdev_id; + __le32 flag; + __le32 wake_reason; + __le32 data_len; +} __packed; + +struct wmi_wow_ev_pg_fault_param { + __le32 len; + u8 data[]; +} __packed; + +struct wmi_wow_ev_arg { + enum wmi_wow_wake_reason wake_reason; +}; + +#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100 +#define WMI_PNO_MAX_NETW_CHANNELS 26 +#define WMI_PNO_MAX_NETW_CHANNELS_EX 60 +#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID +#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN + +/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */ +#define WMI_PNO_MAX_PB_REQ_SIZE 450 + +#define WMI_PNO_24GHZ_DEFAULT_CH 1 +#define WMI_PNO_5GHZ_DEFAULT_CH 36 + +#define WMI_ACTIVE_MAX_CHANNEL_TIME 40 +#define WMI_PASSIVE_MAX_CHANNEL_TIME 110 + +/* SSID broadcast type */ +enum wmi_ssid_bcast_type { + BCAST_UNKNOWN = 0, + BCAST_NORMAL = 1, + BCAST_HIDDEN = 2, +}; + +#define WMI_NLO_MAX_SSIDS 16 +#define WMI_NLO_MAX_CHAN 48 + +#define WMI_NLO_CONFIG_STOP BIT(0) +#define WMI_NLO_CONFIG_START BIT(1) +#define WMI_NLO_CONFIG_RESET BIT(2) +#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4) +#define WMI_NLO_CONFIG_FAST_SCAN BIT(5) +#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6) + +/* This bit is used to indicate if EPNO or supplicant PNO is enabled. + * Only one of them can be enabled at a given time + */ +#define WMI_NLO_CONFIG_ENLO BIT(7) +#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8) +#define WMI_NLO_CONFIG_ENLO_RESET BIT(9) +#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10) +#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11) +#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12) +#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13) + +struct wmi_nlo_ssid_params { + __le32 valid; + struct ath12k_wmi_ssid_params ssid; +} __packed; + +struct wmi_nlo_enc_params { + __le32 valid; + __le32 enc_type; +} __packed; + +struct wmi_nlo_auth_params { + __le32 valid; + __le32 auth_type; +} __packed; + +struct wmi_nlo_bcast_nw_params { + __le32 valid; + __le32 bcast_nw_type; +} __packed; + +struct wmi_nlo_rssi_params { + __le32 valid; + __le32 rssi; +} __packed; + +struct nlo_configured_params { + /* TLV tag and len;*/ + __le32 tlv_header; + struct wmi_nlo_ssid_params ssid; + struct wmi_nlo_enc_params enc_type; + struct wmi_nlo_auth_params auth_type; + struct wmi_nlo_rssi_params rssi_cond; + + /* indicates if the SSID is hidden or not */ + struct wmi_nlo_bcast_nw_params bcast_nw_type; +} __packed; + +struct wmi_network_type_arg { + struct cfg80211_ssid ssid; + u32 authentication; + u32 encryption; + u32 bcast_nw_type; + u8 channel_count; + u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX]; + s32 rssi_threshold; +}; + +struct wmi_pno_scan_req_arg { + u8 enable; + u8 vdev_id; + u8 uc_networks_count; + struct wmi_network_type_arg a_networks[WMI_PNO_MAX_SUPP_NETWORKS]; + u32 fast_scan_period; + u32 slow_scan_period; + u8 fast_scan_max_cycles; + + bool do_passive_scan; + + u32 delay_start_time; + u32 active_min_time; + u32 active_max_time; + u32 passive_min_time; + u32 passive_max_time; + + /* mac address randomization attributes */ + u32 enable_pno_scan_randomization; + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; +}; + +struct wmi_wow_nlo_config_cmd { + __le32 tlv_header; + __le32 flags; + __le32 vdev_id; + __le32 fast_scan_max_cycles; + __le32 active_dwell_time; + __le32 passive_dwell_time; + __le32 probe_bundle_size; + + /* ART = IRT */ + __le32 rest_time; + + /* max value that can be reached after scan_backoff_multiplier */ + __le32 max_rest_time; + + __le32 scan_backoff_multiplier; + __le32 fast_scan_period; + + /* specific to windows */ + __le32 slow_scan_period; + + __le32 no_of_ssids; + + __le32 num_of_channels; + + /* NLO scan start delay time in milliseconds */ + __le32 delay_start_time; + + /* MAC Address to use in Probe Req as SA */ + struct ath12k_wmi_mac_addr_params mac_addr; + + /* Mask on which MAC has to be randomized */ + struct ath12k_wmi_mac_addr_params mac_mask; + + /* IE bitmap to use in Probe Req */ + __le32 ie_bitmap[8]; + + /* Number of vendor OUIs. In the TLV vendor_oui[] */ + __le32 num_vendor_oui; + + /* Number of connected NLO band preferences */ + __le32 num_cnlo_band_pref; + + /* The TLVs will follow. + * nlo_configured_params nlo_list[]; + * u32 channel_list[num_of_channels]; + */ +} __packed; + +/* Definition of HW data filtering */ +enum hw_data_filter_type { + WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0), + WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1), +}; + +struct wmi_hw_data_filter_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 enable; + __le32 hw_filter_bitmap; +} __packed; + +struct wmi_hw_data_filter_arg { + u32 vdev_id; + bool enable; + u32 hw_filter_bitmap; +}; + +#define WMI_IPV6_UC_TYPE 0 +#define WMI_IPV6_AC_TYPE 1 + +#define WMI_IPV6_MAX_COUNT 16 +#define WMI_IPV4_MAX_COUNT 2 + +struct wmi_arp_ns_offload_arg { + u8 ipv4_addr[WMI_IPV4_MAX_COUNT][4]; + u32 ipv4_count; + u32 ipv6_count; + u8 ipv6_addr[WMI_IPV6_MAX_COUNT][16]; + u8 self_ipv6_addr[WMI_IPV6_MAX_COUNT][16]; + u8 ipv6_type[WMI_IPV6_MAX_COUNT]; + bool ipv6_valid[WMI_IPV6_MAX_COUNT]; + u8 mac_addr[ETH_ALEN]; +}; + +#define WMI_MAX_NS_OFFLOADS 2 +#define WMI_MAX_ARP_OFFLOADS 2 + +#define WMI_ARPOL_FLAGS_VALID BIT(0) +#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1) +#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2) + +struct wmi_arp_offload_params { + __le32 tlv_header; + __le32 flags; + u8 target_ipaddr[4]; + u8 remote_ipaddr[4]; + struct ath12k_wmi_mac_addr_params target_mac; +} __packed; + +#define WMI_NSOL_FLAGS_VALID BIT(0) +#define WMI_NSOL_FLAGS_MAC_VALID BIT(1) +#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2) +#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3) + +#define WMI_NSOL_MAX_TARGET_IPS 2 + +struct wmi_ns_offload_params { + __le32 tlv_header; + __le32 flags; + u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16]; + u8 solicitation_ipaddr[16]; + u8 remote_ipaddr[16]; + struct ath12k_wmi_mac_addr_params target_mac; +} __packed; + +struct wmi_set_arp_ns_offload_cmd { + __le32 tlv_header; + __le32 flags; + __le32 vdev_id; + __le32 num_ns_ext_tuples; + /* The TLVs follow: + * wmi_ns_offload_params ns[WMI_MAX_NS_OFFLOADS]; + * wmi_arp_offload_params arp[WMI_MAX_ARP_OFFLOADS]; + * wmi_ns_offload_params ns_ext[num_ns_ext_tuples]; + */ +} __packed; + +#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000 +#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000 +#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000 +#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000 + +#define GTK_OFFLOAD_KEK_BYTES 16 +#define GTK_OFFLOAD_KCK_BYTES 16 +#define GTK_REPLAY_COUNTER_BYTES 8 +#define WMI_MAX_KEY_LEN 32 +#define IGTK_PN_SIZE 6 + +struct wmi_gtk_offload_status_event { + __le32 vdev_id; + __le32 flags; + __le32 refresh_cnt; + __le64 replay_ctr; + u8 igtk_key_index; + u8 igtk_key_length; + u8 igtk_key_rsc[IGTK_PN_SIZE]; + u8 igtk_key[WMI_MAX_KEY_LEN]; + u8 gtk_key_index; + u8 gtk_key_length; + u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES]; + u8 gtk_key[WMI_MAX_KEY_LEN]; +} __packed; + +struct wmi_gtk_rekey_offload_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 flags; + u8 kek[GTK_OFFLOAD_KEK_BYTES]; + u8 kck[GTK_OFFLOAD_KCK_BYTES]; + u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES]; +} __packed; + +struct wmi_sta_keepalive_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 enabled; + + /* WMI_STA_KEEPALIVE_METHOD_ */ + __le32 method; + + /* in seconds */ + __le32 interval; + + /* following this structure is the TLV for struct + * wmi_sta_keepalive_arp_resp_params + */ +} __packed; + +struct wmi_sta_keepalive_arp_resp_params { + __le32 tlv_header; + __le32 src_ip4_addr; + __le32 dest_ip4_addr; + struct ath12k_wmi_mac_addr_params dest_mac_addr; +} __packed; + +struct wmi_sta_keepalive_arg { + u32 vdev_id; + u32 enabled; + u32 method; + u32 interval; + u32 src_ip4_addr; + u32 dest_ip4_addr; + const u8 dest_mac_addr[ETH_ALEN]; +}; + +enum wmi_sta_keepalive_method { + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1, + WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2, + WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3, + WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4, + WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5, +}; + +#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 +#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 + void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config); void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -4802,12 +5637,14 @@ int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len); int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id, struct sk_buff *frame); +int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, + const u8 *p2p_ie); int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, - struct sk_buff *bcn); + struct sk_buff *bcn, + struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args); int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id); -int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, - const u8 *bssid); +int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params); int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id); int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, bool restart); @@ -4855,8 +5692,6 @@ int ath12k_wmi_vdev_install_key(struct ath12k *ar, struct wmi_vdev_install_key_arg *arg); int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, enum wmi_bss_chan_info_req_type type); -int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, - u32 vdev_id, u32 pdev_id); int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar); int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, u8 peer_addr[ETH_ALEN], @@ -4914,5 +5749,62 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, struct sk_buff *tmpl); int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, enum wmi_host_hw_mode_config_type mode); +int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, + const u8 *buf, size_t buf_len); +int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table); +int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table); + +static inline u32 +ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param) +{ + return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_PDEV_ID); +} + +static inline u32 +ath12k_wmi_caps_ext_get_hw_link_id(const struct ath12k_wmi_caps_ext_params *param) +{ + return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_HW_LINK_ID); +} + +static inline u32 +ath12k_wmi_mac_phy_get_pdev_id(const struct ath12k_wmi_mac_phy_caps_params *param) +{ + return le32_get_bits(param->pdev_and_hw_link_ids, + WMI_CAPS_PARAMS_PDEV_ID); +} + +static inline u32 +ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *param) +{ + return le32_get_bits(param->pdev_and_hw_link_ids, + WMI_CAPS_PARAMS_HW_LINK_ID); +} + +int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar); +int ath12k_wmi_wow_enable(struct ath12k *ar); +int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id); +int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset); +int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable); +int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno_scan); +int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, + struct wmi_hw_data_filter_arg *arg); +int ath12k_wmi_arp_ns_offload(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct wmi_arp_ns_offload_arg *offload, + bool enable); +int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, + struct ath12k_link_vif *arvif, bool enable); +int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, + struct ath12k_link_vif *arvif); +int ath12k_wmi_sta_keepalive(struct ath12k *ar, + const struct wmi_sta_keepalive_arg *arg); +int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params); +int ath12k_wmi_mlo_ready(struct ath12k *ar); +int ath12k_wmi_mlo_teardown(struct ath12k *ar); #endif diff --git a/wow.c b/wow.c new file mode 100644 index 000000000000..9e1c0bfd212f --- /dev/null +++ b/wow.c @@ -0,0 +1,1027 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/inetdevice.h> +#include <net/addrconf.h> +#include <net/if_inet6.h> +#include <net/ipv6.h> + +#include "mac.h" + +#include <net/mac80211.h> +#include "core.h" +#include "hif.h" +#include "debug.h" +#include "wmi.h" +#include "wow.h" + +static const struct wiphy_wowlan_support ath12k_wowlan_support = { + .flags = WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE, + .pattern_min_len = WOW_MIN_PATTERN_SIZE, + .pattern_max_len = WOW_MAX_PATTERN_SIZE, + .max_pkt_offset = WOW_MAX_PKT_OFFSET, +}; + +static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *ahvif) +{ + return (ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE || + ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT || + ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO); +} + +int ath12k_wow_enable(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + int i, ret; + + clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); + + /* The firmware might be busy and it can not enter WoW immediately. + * In that case firmware notifies host with + * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again + * later. Per the firmware team there could be up to 10 loops. + */ + for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) { + reinit_completion(&ab->htc_suspend); + + ret = ath12k_wmi_wow_enable(ar); + if (ret) { + ath12k_warn(ab, "failed to issue wow enable: %d\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ); + if (ret == 0) { + ath12k_warn(ab, + "timed out while waiting for htc suspend completion\n"); + return -ETIMEDOUT; + } + + if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags)) + /* success, suspend complete received */ + return 0; + + ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n", + i); + msleep(ATH12K_WOW_RETRY_WAIT_MS); + } + + ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i); + + return -ETIMEDOUT; +} + +int ath12k_wow_wakeup(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + int ret; + + reinit_completion(&ab->wow.wakeup_completed); + + ret = ath12k_wmi_wow_host_wakeup_ind(ar); + if (ret) { + ath12k_warn(ab, "failed to send wow wakeup indication: %d\n", + ret); + return ret; + } + + ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); + if (ret == 0) { + ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int ath12k_wow_vif_cleanup(struct ath12k_link_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + int i, ret; + + for (i = 0; i < WOW_EVENT_MAX; i++) { + ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0); + if (ret) { + ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + for (i = 0; i < ar->wow.max_num_patterns; i++) { + ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i); + if (ret) { + ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n", + i, arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_cleanup(struct ath12k *ar) +{ + struct ath12k_link_vif *arvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath12k_wow_vif_cleanup(arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +/* Convert a 802.3 format to a 802.11 format. + * +------------+-----------+--------+----------------+ + * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | + * +------------+-----------+--------+----------------+ + * |__ |_______ |____________ |________ + * | | | | + * +--+------------+----+-----------+---------------+-----------+ + * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | + * +--+------------+----+-----------+---------------+-----------+ + */ +static void +ath12k_wow_convert_8023_to_80211(struct ath12k *ar, + const struct cfg80211_pkt_pattern *eth_pattern, + struct ath12k_pkt_pattern *i80211_pattern) +{ + size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type); + size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1); + size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3); + size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr); + size_t prot_ofs = offsetof(struct ethhdr, h_proto); + size_t src_ofs = offsetof(struct ethhdr, h_source); + u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {}; + const u8 *eth_pat = eth_pattern->pattern; + size_t eth_pat_len = eth_pattern->pattern_len; + size_t eth_pkt_ofs = eth_pattern->pkt_offset; + u8 *bytemask = i80211_pattern->bytemask; + u8 *pat = i80211_pattern->pattern; + size_t pat_len = 0; + size_t pkt_ofs = 0; + size_t delta; + int i; + + /* convert bitmask to bytemask */ + for (i = 0; i < eth_pat_len; i++) + if (eth_pattern->mask[i / 8] & BIT(i % 8)) + eth_bytemask[i] = 0xff; + + if (eth_pkt_ofs < ETH_ALEN) { + pkt_ofs = eth_pkt_ofs + a1_ofs; + + if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) { + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } else if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) { + memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); + + delta = eth_pkt_ofs + eth_pat_len - src_ofs; + memcpy(pat + a3_ofs - pkt_ofs, + eth_pat + ETH_ALEN - eth_pkt_ofs, + delta); + memcpy(bytemask + a3_ofs - pkt_ofs, + eth_bytemask + ETH_ALEN - eth_pkt_ofs, + delta); + + pat_len = a3_ofs - pkt_ofs + delta; + } else { + memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); + + memcpy(pat + a3_ofs - pkt_ofs, + eth_pat + ETH_ALEN - eth_pkt_ofs, + ETH_ALEN); + memcpy(bytemask + a3_ofs - pkt_ofs, + eth_bytemask + ETH_ALEN - eth_pkt_ofs, + ETH_ALEN); + + delta = eth_pkt_ofs + eth_pat_len - prot_ofs; + memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_pat + prot_ofs - eth_pkt_ofs, + delta); + memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_bytemask + prot_ofs - eth_pkt_ofs, + delta); + + pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta; + } + } else if (eth_pkt_ofs < prot_ofs) { + pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs; + + if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) { + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } else { + memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs); + + delta = eth_pkt_ofs + eth_pat_len - prot_ofs; + memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_pat + prot_ofs - eth_pkt_ofs, + delta); + memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_bytemask + prot_ofs - eth_pkt_ofs, + delta); + + pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta; + } + } else { + pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs; + + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } + + i80211_pattern->pattern_len = pat_len; + i80211_pattern->pkt_offset = pkt_ofs; +} + +static int +ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id, + const struct cfg80211_sched_scan_request *nd_config, + struct wmi_pno_scan_req_arg *pno) +{ + int i, j; + u8 ssid_len; + + pno->enable = 1; + pno->vdev_id = vdev_id; + pno->uc_networks_count = nd_config->n_match_sets; + + if (!pno->uc_networks_count || + pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS) + return -EINVAL; + + if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX) + return -EINVAL; + + /* Filling per profile params */ + for (i = 0; i < pno->uc_networks_count; i++) { + ssid_len = nd_config->match_sets[i].ssid.ssid_len; + + if (ssid_len == 0 || ssid_len > 32) + return -EINVAL; + + pno->a_networks[i].ssid.ssid_len = ssid_len; + + memcpy(pno->a_networks[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid, + ssid_len); + pno->a_networks[i].authentication = 0; + pno->a_networks[i].encryption = 0; + pno->a_networks[i].bcast_nw_type = 0; + + /* Copying list of valid channel into request */ + pno->a_networks[i].channel_count = nd_config->n_channels; + pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold; + + for (j = 0; j < nd_config->n_channels; j++) { + pno->a_networks[i].channels[j] = + nd_config->channels[j]->center_freq; + } + } + + /* set scan to passive if no SSIDs are specified in the request */ + if (nd_config->n_ssids == 0) + pno->do_passive_scan = true; + else + pno->do_passive_scan = false; + + for (i = 0; i < nd_config->n_ssids; i++) { + for (j = 0; j < pno->uc_networks_count; j++) { + if (pno->a_networks[j].ssid.ssid_len == + nd_config->ssids[i].ssid_len && + !memcmp(pno->a_networks[j].ssid.ssid, + nd_config->ssids[i].ssid, + pno->a_networks[j].ssid.ssid_len)) { + pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN; + break; + } + } + } + + if (nd_config->n_scan_plans == 2) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations; + pno->slow_scan_period = + nd_config->scan_plans[1].interval * MSEC_PER_SEC; + } else if (nd_config->n_scan_plans == 1) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = 1; + pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + } else { + ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d", + nd_config->n_scan_plans); + } + + if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + /* enable mac randomization */ + pno->enable_pno_scan_randomization = 1; + memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN); + memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN); + } + + pno->delay_start_time = nd_config->delay; + + /* Current FW does not support min-max range for dwell time */ + pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME; + pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME; + + return 0; +} + +static int ath12k_wow_vif_set_wakeups(struct ath12k_link_vif *arvif, + struct cfg80211_wowlan *wowlan) +{ + const struct cfg80211_pkt_pattern *patterns = wowlan->patterns; + struct ath12k *ar = arvif->ar; + unsigned long wow_mask = 0; + int pattern_id = 0; + int ret, i, j; + + /* Setup requested WOW features */ + switch (arvif->ahvif->vdev_type) { + case WMI_VDEV_TYPE_IBSS: + __set_bit(WOW_BEACON_EVENT, &wow_mask); + fallthrough; + case WMI_VDEV_TYPE_AP: + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask); + __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask); + __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask); + __set_bit(WOW_HTT_EVENT, &wow_mask); + __set_bit(WOW_RA_MATCH_EVENT, &wow_mask); + break; + case WMI_VDEV_TYPE_STA: + if (wowlan->disconnect) { + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_BMISS_EVENT, &wow_mask); + __set_bit(WOW_CSA_IE_EVENT, &wow_mask); + } + + if (wowlan->magic_pkt) + __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); + + if (wowlan->nd_config) { + struct wmi_pno_scan_req_arg *pno; + int ret; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + ar->nlo_enabled = true; + + ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id, + wowlan->nd_config, pno); + if (!ret) { + ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); + __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask); + } + + kfree(pno); + } + break; + default: + break; + } + + for (i = 0; i < wowlan->n_patterns; i++) { + const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i]; + struct ath12k_pkt_pattern new_pattern = {}; + + if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; + + if (ar->ab->wow.wmi_conf_rx_decap_mode == + ATH12K_HW_TXRX_NATIVE_WIFI) { + ath12k_wow_convert_8023_to_80211(ar, eth_pattern, + &new_pattern); + + if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; + } else { + memcpy(new_pattern.pattern, eth_pattern->pattern, + eth_pattern->pattern_len); + + /* convert bitmask to bytemask */ + for (j = 0; j < eth_pattern->pattern_len; j++) + if (eth_pattern->mask[j / 8] & BIT(j % 8)) + new_pattern.bytemask[j] = 0xff; + + new_pattern.pattern_len = eth_pattern->pattern_len; + new_pattern.pkt_offset = eth_pattern->pkt_offset; + } + + ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id, + pattern_id, + new_pattern.pattern, + new_pattern.bytemask, + new_pattern.pattern_len, + new_pattern.pkt_offset); + if (ret) { + ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n", + pattern_id, + arvif->vdev_id, ret); + return ret; + } + + pattern_id++; + __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask); + } + + for (i = 0; i < WOW_EVENT_MAX; i++) { + if (!test_bit(i, &wow_mask)) + continue; + ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1); + if (ret) { + ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_set_wakeups(struct ath12k *ar, + struct cfg80211_wowlan *wowlan) +{ + struct ath12k_link_vif *arvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (ath12k_wow_is_p2p_vdev(arvif->ahvif)) + continue; + ret = ath12k_wow_vif_set_wakeups(arvif, wowlan); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id) +{ + struct wmi_pno_scan_req_arg *pno; + int ret; + + if (!ar->nlo_enabled) + return 0; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + pno->enable = 0; + ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno); + if (ret) { + ath12k_warn(ar->ab, "failed to disable PNO: %d", ret); + goto out; + } + + ar->nlo_enabled = false; + +out: + kfree(pno); + return ret; +} + +static int ath12k_wow_vif_clean_nlo(struct ath12k_link_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + + switch (arvif->ahvif->vdev_type) { + case WMI_VDEV_TYPE_STA: + return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id); + default: + return 0; + } +} + +static int ath12k_wow_nlo_cleanup(struct ath12k *ar) +{ + struct ath12k_link_vif *arvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (ath12k_wow_is_p2p_vdev(arvif->ahvif)) + continue; + + ret = ath12k_wow_vif_clean_nlo(arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_set_hw_filter(struct ath12k *ar) +{ + struct wmi_hw_data_filter_arg arg; + struct ath12k_link_vif *arvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + arg.vdev_id = arvif->vdev_id; + arg.enable = true; + arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC; + ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_clear_hw_filter(struct ath12k *ar) +{ + struct wmi_hw_data_filter_arg arg; + struct ath12k_link_vif *arvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + arg.vdev_id = arvif->vdev_id; + arg.enable = false; + arg.hw_filter_bitmap = 0; + ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg); + + if (ret) { + ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab, + struct wmi_arp_ns_offload_arg *offload) +{ + int i; + + for (i = 0; i < offload->ipv6_count; i++) { + offload->self_ipv6_addr[i][0] = 0xff; + offload->self_ipv6_addr[i][1] = 0x02; + offload->self_ipv6_addr[i][11] = 0x01; + offload->self_ipv6_addr[i][12] = 0xff; + offload->self_ipv6_addr[i][13] = + offload->ipv6_addr[i][13]; + offload->self_ipv6_addr[i][14] = + offload->ipv6_addr[i][14]; + offload->self_ipv6_addr[i][15] = + offload->ipv6_addr[i][15]; + ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n", + offload->self_ipv6_addr[i]); + } +} + +static void ath12k_wow_prepare_ns_offload(struct ath12k_link_vif *arvif, + struct wmi_arp_ns_offload_arg *offload) +{ + struct net_device *ndev = ieee80211_vif_to_wdev(arvif->ahvif->vif)->netdev; + struct ath12k_base *ab = arvif->ar->ab; + struct inet6_ifaddr *ifa6; + struct ifacaddr6 *ifaca6; + struct inet6_dev *idev; + u32 count = 0, scope; + + if (!ndev) + return; + + idev = in6_dev_get(ndev); + if (!idev) + return; + + ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n"); + + read_lock_bh(&idev->lock); + + /* get unicast address */ + list_for_each_entry(ifa6, &idev->addr_list, if_list) { + if (count >= WMI_IPV6_MAX_COUNT) + goto unlock; + + if (ifa6->flags & IFA_F_DADFAILED) + continue; + + scope = ipv6_addr_src_scope(&ifa6->addr); + if (scope != IPV6_ADDR_SCOPE_LINKLOCAL && + scope != IPV6_ADDR_SCOPE_GLOBAL) { + ath12k_dbg(ab, ATH12K_DBG_WOW, + "Unsupported ipv6 scope: %d\n", scope); + continue; + } + + memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr, + sizeof(ifa6->addr.s6_addr)); + offload->ipv6_type[count] = WMI_IPV6_UC_TYPE; + ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n", + count, offload->ipv6_addr[count], + scope); + count++; + } + + /* get anycast address */ + rcu_read_lock(); + + for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6; + ifaca6 = rcu_dereference(ifaca6->aca_next)) { + if (count >= WMI_IPV6_MAX_COUNT) { + rcu_read_unlock(); + goto unlock; + } + + scope = ipv6_addr_src_scope(&ifaca6->aca_addr); + if (scope != IPV6_ADDR_SCOPE_LINKLOCAL && + scope != IPV6_ADDR_SCOPE_GLOBAL) { + ath12k_dbg(ab, ATH12K_DBG_WOW, + "Unsupported ipv scope: %d\n", scope); + continue; + } + + memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr, + sizeof(ifaca6->aca_addr)); + offload->ipv6_type[count] = WMI_IPV6_AC_TYPE; + ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n", + count, offload->ipv6_addr[count], + scope); + count++; + } + + rcu_read_unlock(); + +unlock: + read_unlock_bh(&idev->lock); + + in6_dev_put(idev); + + offload->ipv6_count = count; + ath12k_wow_generate_ns_mc_addr(ab, offload); +} + +static void ath12k_wow_prepare_arp_offload(struct ath12k_link_vif *arvif, + struct wmi_arp_ns_offload_arg *offload) +{ + struct ieee80211_vif *vif = arvif->ahvif->vif; + struct ieee80211_vif_cfg vif_cfg = vif->cfg; + struct ath12k_base *ab = arvif->ar->ab; + u32 ipv4_cnt; + + ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n"); + + ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT); + memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32)); + offload->ipv4_count = ipv4_cnt; + + ath12k_dbg(ab, ATH12K_DBG_WOW, + "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n", + vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr); +} + +static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable) +{ + struct wmi_arp_ns_offload_arg *offload; + struct ath12k_link_vif *arvif; + struct ath12k_vif *ahvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + offload = kmalloc(sizeof(*offload), GFP_KERNEL); + if (!offload) + return -ENOMEM; + + list_for_each_entry(arvif, &ar->arvifs, list) { + ahvif = arvif->ahvif; + + if (ahvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + memset(offload, 0, sizeof(*offload)); + + memcpy(offload->mac_addr, ahvif->vif->addr, ETH_ALEN); + ath12k_wow_prepare_ns_offload(arvif, offload); + ath12k_wow_prepare_arp_offload(arvif, offload); + + ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n", + arvif->vdev_id, enable, ret); + return ret; + } + } + + kfree(offload); + + return 0; +} + +static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable) +{ + struct ath12k_link_vif *arvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA || + !arvif->is_up || + !arvif->rekey_data.enable_offload) + continue; + + /* get rekey info before disable rekey offload */ + if (!enable) { + ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable); + + if (ret) { + ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n", + arvif->vdev_id, enable, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable) +{ + int ret; + + ret = ath12k_wow_arp_ns_offload(ar, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n", + enable, ret); + return ret; + } + + ret = ath12k_gtk_rekey_offload(ar, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n", + enable, ret); + return ret; + } + + return 0; +} + +static int ath12k_wow_set_keepalive(struct ath12k *ar, + enum wmi_sta_keepalive_method method, + u32 interval) +{ + struct ath12k_link_vif *arvif; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath12k_mac_vif_set_keepalive(arvif, method, interval); + if (ret) + return ret; + } + + return 0; +} + +int ath12k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + ret = ath12k_wow_cleanup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n", + ret); + goto exit; + } + + ret = ath12k_wow_set_wakeups(ar, wowlan); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_wow_protocol_offload(ar, true); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_mac_wait_tx_complete(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret); + goto cleanup; + } + + ret = ath12k_wow_set_hw_filter(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to set hw filter: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_wow_set_keepalive(ar, + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, + WMI_STA_KEEPALIVE_INTERVAL_DEFAULT); + if (ret) { + ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret); + goto cleanup; + } + + ret = ath12k_wow_enable(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to start wow: %d\n", ret); + goto cleanup; + } + + ath12k_hif_irq_disable(ar->ab); + ath12k_hif_ce_irq_disable(ar->ab); + + ret = ath12k_hif_suspend(ar->ab); + if (ret) { + ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret); + goto wakeup; + } + + goto exit; + +wakeup: + ath12k_wow_wakeup(ar); + +cleanup: + ath12k_wow_cleanup(ar); + +exit: + return ret ? 1 : 0; +} + +void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + + lockdep_assert_wiphy(hw->wiphy); + + device_set_wakeup_enable(ar->ab->dev, enabled); +} + +int ath12k_wow_op_resume(struct ieee80211_hw *hw) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + int ret; + + lockdep_assert_wiphy(hw->wiphy); + + ret = ath12k_hif_resume(ar->ab); + if (ret) { + ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret); + goto exit; + } + + ath12k_hif_ce_irq_enable(ar->ab); + ath12k_hif_irq_enable(ar->ab); + + ret = ath12k_wow_wakeup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_nlo_cleanup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_clear_hw_filter(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_protocol_offload(ar, false); + if (ret) { + ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n", + ret); + goto exit; + } + + ret = ath12k_wow_set_keepalive(ar, + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, + WMI_STA_KEEPALIVE_INTERVAL_DISABLE); + if (ret) { + ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret); + goto exit; + } + +exit: + if (ret) { + switch (ah->state) { + case ATH12K_HW_STATE_ON: + ah->state = ATH12K_HW_STATE_RESTARTING; + ret = 1; + break; + case ATH12K_HW_STATE_OFF: + case ATH12K_HW_STATE_RESTARTING: + case ATH12K_HW_STATE_RESTARTED: + case ATH12K_HW_STATE_WEDGED: + ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n", + ah->state); + ret = -EIO; + break; + } + } + + return ret; +} + +int ath12k_wow_init(struct ath12k *ar) +{ + if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map)) + return 0; + + ar->wow.wowlan_support = ath12k_wowlan_support; + + if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) { + ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; + ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; + } + + if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { + ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; + ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + } + + ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS; + ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; + ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support; + + device_set_wakeup_capable(ar->ab->dev, true); + + return 0; +} diff --git a/wow.h b/wow.h new file mode 100644 index 000000000000..af9be5fadcc3 --- /dev/null +++ b/wow.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_WOW_H +#define ATH12K_WOW_H + +#define ATH12K_WOW_RETRY_NUM 10 +#define ATH12K_WOW_RETRY_WAIT_MS 200 +#define ATH12K_WOW_PATTERNS 22 + +struct ath12k_wow { + u32 max_num_patterns; + struct completion wakeup_completed; + struct wiphy_wowlan_support wowlan_support; +}; + +struct ath12k_pkt_pattern { + u8 pattern[WOW_MAX_PATTERN_SIZE]; + u8 bytemask[WOW_MAX_PATTERN_SIZE]; + int pattern_len; + int pkt_offset; +}; + +struct rfc1042_hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + __be16 eth_type; +} __packed; + +#ifdef CONFIG_PM + +int ath12k_wow_init(struct ath12k *ar); +int ath12k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); +int ath12k_wow_op_resume(struct ieee80211_hw *hw); +void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); +int ath12k_wow_enable(struct ath12k *ar); +int ath12k_wow_wakeup(struct ath12k *ar); + +#else + +static inline int ath12k_wow_init(struct ath12k *ar) +{ + return 0; +} + +static inline int ath12k_wow_enable(struct ath12k *ar) +{ + return 0; +} + +static inline int ath12k_wow_wakeup(struct ath12k *ar) +{ + return 0; +} +#endif /* CONFIG_PM */ +#endif /* ATH12K_WOW_H */ |
