diff options
Diffstat (limited to 'sys/contrib/dev/iwlwifi/pcie')
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/ctxt-info-v2.c (renamed from sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c) | 213 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/ctxt-info.c | 29 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/drv.c | 2503 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/gen1_2/internal.h (renamed from sys/contrib/dev/iwlwifi/pcie/internal.h) | 188 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/gen1_2/rx.c (renamed from sys/contrib/dev/iwlwifi/pcie/rx.c) | 223 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/gen1_2/trans-gen2.c (renamed from sys/contrib/dev/iwlwifi/pcie/trans-gen2.c) | 209 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/gen1_2/trans.c (renamed from sys/contrib/dev/iwlwifi/pcie/trans.c) | 1216 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/gen1_2/tx-gen2.c (renamed from sys/contrib/dev/iwlwifi/pcie/tx-gen2.c) | 88 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/gen1_2/tx.c (renamed from sys/contrib/dev/iwlwifi/pcie/tx.c) | 291 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/iwl-context-info-v2.h | 344 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/iwl-context-info.h | 197 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/utils.c | 128 | ||||
| -rw-r--r-- | sys/contrib/dev/iwlwifi/pcie/utils.h | 40 | 
13 files changed, 3341 insertions, 2328 deletions
| diff --git a/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c b/sys/contrib/dev/iwlwifi/pcie/ctxt-info-v2.c index ae93a72542b2..06be929a3ca5 100644 --- a/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c +++ b/sys/contrib/dev/iwlwifi/pcie/ctxt-info-v2.c @@ -1,12 +1,12 @@  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause  /* - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation   */  #include <linux/dmi.h>  #include "iwl-trans.h"  #include "iwl-fh.h" -#include "iwl-context-info-gen3.h" -#include "internal.h" +#include "iwl-context-info-v2.h" +#include "gen1_2/internal.h"  #include "iwl-prph.h"  static const struct dmi_system_id dmi_force_scu_active_approved_list[] = { @@ -97,20 +97,22 @@ out:  		*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;  } -int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, -				 const struct fw_img *fw) +int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans, +				const struct iwl_fw *fw, +				const struct fw_img *img)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct iwl_context_info_gen3 *ctxt_info_gen3; +	struct iwl_context_info_v2 *ctxt_info_v2;  	struct iwl_prph_scratch *prph_scratch;  	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;  	struct iwl_prph_info *prph_info;  	u32 control_flags = 0; +	u32 control_flags_ext = 0;  	int ret;  	int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, -			      trans->cfg->min_txq_size); +			      trans->mac_cfg->base->min_txq_size); -	switch (trans_pcie->rx_buf_size) { +	switch (trans->conf.rx_buf_size) {  	case IWL_AMSDU_DEF:  		return -EINVAL;  	case IWL_AMSDU_2K: @@ -130,6 +132,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,  		break;  	} +	if (trans->conf.dsbr_urm_fw_dependent) +		control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW; + +	if (trans->conf.dsbr_urm_permanent) +		control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM; + +	if (trans->conf.ext_32khz_clock_valid) +		control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID; +  	/* Allocate prph scratch */  	prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),  					  &trans_pcie->prph_scratch_dma_addr, @@ -141,16 +152,16 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,  	prph_sc_ctrl->version.version = 0;  	prph_sc_ctrl->version.mac_id = -		cpu_to_le16((u16)trans->hw_rev); +		cpu_to_le16((u16)trans->info.hw_rev);  	prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);  	control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;  	control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT; -	if (trans->trans_cfg->imr_enabled) +	if (trans->mac_cfg->imr_enabled)  		control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN; -	if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && +	if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL &&  	    iwl_is_force_scu_active_approved()) {  		control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;  		IWL_DEBUG_FW(trans, @@ -158,6 +169,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,  			     IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);  	} +	if (trans->do_top_reset) { +		WARN_ON(trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC); +		control_flags |= IWL_PRPH_SCRATCH_TOP_RESET; +	} +  	/* initialize RX default queue */  	prph_sc_ctrl->rbd_cfg.free_rbd_addr =  		cpu_to_le64(trans_pcie->rxq->bd_dma); @@ -165,17 +181,19 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,  	iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,  				      &control_flags);  	prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); +	prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext);  	/* initialize the Step equalizer data */ -	prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step); -	prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step); +	prph_sc_ctrl->step_cfg.mbx_addr_0 = +		cpu_to_le32(trans->conf.mbx_addr_0_step); +	prph_sc_ctrl->step_cfg.mbx_addr_1 = +		cpu_to_le32(trans->conf.mbx_addr_1_step);  	/* allocate ucode sections in dram and set addresses */ -	ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); +	ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common);  	if (ret)  		goto err_free_prph_scratch; -  	/* Allocate prph information  	 * currently we don't assign to the prph info anything, but it would get  	 * assigned later @@ -195,42 +213,58 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,  	}  	/* Allocate context info */ -	ctxt_info_gen3 = dma_alloc_coherent(trans->dev, -					    sizeof(*ctxt_info_gen3), -					    &trans_pcie->ctxt_info_dma_addr, -					    GFP_KERNEL); -	if (!ctxt_info_gen3) { +	ctxt_info_v2 = dma_alloc_coherent(trans->dev, +					  sizeof(*ctxt_info_v2), +					  &trans_pcie->ctxt_info_dma_addr, +					  GFP_KERNEL); +	if (!ctxt_info_v2) {  		ret = -ENOMEM;  		goto err_free_prph_info;  	} -	ctxt_info_gen3->prph_info_base_addr = +	ctxt_info_v2->prph_info_base_addr =  		cpu_to_le64(trans_pcie->prph_info_dma_addr); -	ctxt_info_gen3->prph_scratch_base_addr = +	ctxt_info_v2->prph_scratch_base_addr =  		cpu_to_le64(trans_pcie->prph_scratch_dma_addr); -	ctxt_info_gen3->prph_scratch_size = -		cpu_to_le32(sizeof(*prph_scratch)); -	ctxt_info_gen3->cr_head_idx_arr_base_addr = + +	/* +	 * This code assumes the FSEQ is last and we can make that +	 * optional; old devices _should_ be fine with a bigger size, +	 * but in simulation we check the size more precisely. +	 */ +	BUILD_BUG_ON(offsetofend(typeof(*prph_scratch), dram.common) + +		     sizeof(prph_scratch->dram.fseq_img) != +		     sizeof(*prph_scratch)); +	if (control_flags_ext & IWL_PRPH_SCRATCH_EXT_EXT_FSEQ) +		ctxt_info_v2->prph_scratch_size = +			cpu_to_le32(sizeof(*prph_scratch)); +	else +		ctxt_info_v2->prph_scratch_size = +			cpu_to_le32(offsetofend(typeof(*prph_scratch), +						dram.common)); + +	ctxt_info_v2->cr_head_idx_arr_base_addr =  		cpu_to_le64(trans_pcie->rxq->rb_stts_dma); -	ctxt_info_gen3->tr_tail_idx_arr_base_addr = +	ctxt_info_v2->tr_tail_idx_arr_base_addr =  		cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2); -	ctxt_info_gen3->cr_tail_idx_arr_base_addr = +	ctxt_info_v2->cr_tail_idx_arr_base_addr =  		cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4); -	ctxt_info_gen3->mtr_base_addr = -		cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); -	ctxt_info_gen3->mcr_base_addr = +	ctxt_info_v2->mtr_base_addr = +		cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr); +	ctxt_info_v2->mcr_base_addr =  		cpu_to_le64(trans_pcie->rxq->used_bd_dma); -	ctxt_info_gen3->mtr_size = +	ctxt_info_v2->mtr_size =  		cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); -	ctxt_info_gen3->mcr_size = -		cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds)); +	ctxt_info_v2->mcr_size = +		cpu_to_le16(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans))); -	trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; +	trans_pcie->ctxt_info_v2 = ctxt_info_v2;  	trans_pcie->prph_info = prph_info;  	trans_pcie->prph_scratch = prph_scratch;  	/* Allocate IML */ -	trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len, +	trans_pcie->iml_len = fw->iml_len; +	trans_pcie->iml = dma_alloc_coherent(trans->dev, fw->iml_len,  					     &trans_pcie->iml_dma_addr,  					     GFP_KERNEL);  	if (!trans_pcie->iml) { @@ -238,27 +272,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,  		goto err_free_ctxt_info;  	} -	memcpy(trans_pcie->iml, trans->iml, trans->iml_len); - -	iwl_enable_fw_load_int_ctx_info(trans); - -	/* kick FW self load */ -	iwl_write64(trans, CSR_CTXT_INFO_ADDR, -		    trans_pcie->ctxt_info_dma_addr); -	iwl_write64(trans, CSR_IML_DATA_ADDR, -		    trans_pcie->iml_dma_addr); -	iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); - -	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, -		    CSR_AUTO_FUNC_BOOT_ENA); +	memcpy(trans_pcie->iml, fw->iml, fw->iml_len);  	return 0;  err_free_ctxt_info: -	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), -			  trans_pcie->ctxt_info_gen3, +	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2), +			  trans_pcie->ctxt_info_v2,  			  trans_pcie->ctxt_info_dma_addr); -	trans_pcie->ctxt_info_gen3 = NULL; +	trans_pcie->ctxt_info_v2 = NULL;  err_free_prph_info:  	dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,  			  trans_pcie->prph_info_dma_addr); @@ -272,14 +294,31 @@ err_free_prph_scratch:  } -void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) +void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans) +{ +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + +	iwl_enable_fw_load_int_ctx_info(trans, trans->do_top_reset); + +	/* kick FW self load */ +	iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr); +	iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr); +	iwl_write32(trans, CSR_IML_SIZE_ADDR, trans_pcie->iml_len); + +	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, +		    CSR_AUTO_FUNC_BOOT_ENA); +} + +void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	if (trans_pcie->iml) { -		dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml, +		dma_free_coherent(trans->dev, trans_pcie->iml_len, +				  trans_pcie->iml,  				  trans_pcie->iml_dma_addr);  		trans_pcie->iml_dma_addr = 0; +		trans_pcie->iml_len = 0;  		trans_pcie->iml = NULL;  	} @@ -288,15 +327,15 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)  	if (alive)  		return; -	if (!trans_pcie->ctxt_info_gen3) +	if (!trans_pcie->ctxt_info_v2)  		return; -	/* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */ -	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), -			  trans_pcie->ctxt_info_gen3, +	/* ctxt_info_v2 and prph_scratch are still needed for PNVM load */ +	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2), +			  trans_pcie->ctxt_info_v2,  			  trans_pcie->ctxt_info_dma_addr);  	trans_pcie->ctxt_info_dma_addr = 0; -	trans_pcie->ctxt_info_gen3 = NULL; +	trans_pcie->ctxt_info_v2 = NULL;  	dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),  			  trans_pcie->prph_scratch, @@ -311,9 +350,9 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)  	trans_pcie->prph_info = NULL;  } -static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans, -					       const struct iwl_pnvm_image *pnvm_data, -					       struct iwl_dram_data *dram) +static int iwl_pcie_load_payloads_contig(struct iwl_trans *trans, +					 const struct iwl_pnvm_image *pnvm_data, +					 struct iwl_dram_data *dram)  {  	u32 len, len0, len1; @@ -352,13 +391,13 @@ static int iwl_pcie_load_payloads_segments  {  	struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];  	struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; -	struct iwl_prph_scrath_mem_desc_addr_array *addresses; +	struct iwl_prph_scratch_mem_desc_addr_array *addresses;  	const void *data;  	u32 len;  	int i;  	/* allocate and init DRAM descriptors array */ -	len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array); +	len = sizeof(struct iwl_prph_scratch_mem_desc_addr_array);  	desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent  						(trans,  						 len, @@ -400,9 +439,9 @@ static int iwl_pcie_load_payloads_segments  } -int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, -					   const struct iwl_pnvm_image *pnvm_payloads, -					   const struct iwl_ucode_capabilities *capa) +int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans, +					 const struct iwl_pnvm_image *pnvm_payloads, +					 const struct iwl_ucode_capabilities *capa)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = @@ -417,7 +456,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,  	if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))  		return -EBUSY; -	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)  		return 0;  	if (!pnvm_payloads->n_chunks) { @@ -434,10 +473,8 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,  			trans->pnvm_loaded = true;  	} else {  		/* save only in one DRAM section */ -		ret = iwl_pcie_load_payloads_continuously -						(trans, -						 pnvm_payloads, -						 &dram_regions->drams[0]); +		ret = iwl_pcie_load_payloads_contig(trans, pnvm_payloads, +						    &dram_regions->drams[0]);  		if (!ret) {  			dram_regions->n_regions = 1;  			trans->pnvm_loaded = true; @@ -472,7 +509,7 @@ static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)  		cpu_to_le32(iwl_dram_regions_size(dram_regions));  } -static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans) +static void iwl_pcie_set_contig_pnvm(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = @@ -484,21 +521,21 @@ static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)  		cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);  } -void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, -					   const struct iwl_ucode_capabilities *capa) +void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans, +					 const struct iwl_ucode_capabilities *capa)  { -	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)  		return;  	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))  		iwl_pcie_set_pnvm_segments(trans);  	else -		iwl_pcie_set_continuous_pnvm(trans); +		iwl_pcie_set_contig_pnvm(trans);  } -int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, -						   const struct iwl_pnvm_image *payloads, -						   const struct iwl_ucode_capabilities *capa) +int iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans, +						 const struct iwl_pnvm_image *payloads, +						 const struct iwl_ucode_capabilities *capa)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = @@ -510,7 +547,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,  	if (trans->reduce_power_loaded)  		return 0; -	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)  		return 0;  	if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) @@ -530,10 +567,8 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,  			trans->reduce_power_loaded = true;  	} else {  		/* save only in one DRAM section */ -		ret = iwl_pcie_load_payloads_continuously -						(trans, -						 payloads, -						 &dram_regions->drams[0]); +		ret = iwl_pcie_load_payloads_contig(trans, payloads, +						    &dram_regions->drams[0]);  		if (!ret) {  			dram_regions->n_regions = 1;  			trans->reduce_power_loaded = true; @@ -556,7 +591,7 @@ static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)  		cpu_to_le32(iwl_dram_regions_size(dram_regions));  } -static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans) +static void iwl_pcie_set_contig_reduce_power(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = @@ -569,15 +604,15 @@ static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)  }  void -iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, -					      const struct iwl_ucode_capabilities *capa) +iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans, +					    const struct iwl_ucode_capabilities *capa)  { -	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)  		return;  	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))  		iwl_pcie_set_reduce_power_segments(trans);  	else -		iwl_pcie_set_continuous_reduce_power(trans); +		iwl_pcie_set_contig_reduce_power(trans);  } diff --git a/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c b/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c index 344e4d5a1c6e..0957223c776d 100644 --- a/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c +++ b/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c @@ -1,12 +1,12 @@  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause  /*   * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation   */  #include "iwl-trans.h"  #include "iwl-fh.h"  #include "iwl-context-info.h" -#include "internal.h" +#include "gen1_2/internal.h"  #include "iwl-prph.h"  static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, @@ -83,7 +83,7 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)  int iwl_pcie_init_fw_sec(struct iwl_trans *trans,  			 const struct fw_img *fw, -			 struct iwl_context_info_dram *ctxt_dram) +			 struct iwl_context_info_dram_nonfseq *ctxt_dram)  {  	struct iwl_self_init_dram *dram = &trans->init_dram;  	int i, ret, lmac_cnt, umac_cnt, paging_cnt; @@ -161,12 +161,12 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,  }  int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, -			    const struct fw_img *fw) +			    const struct fw_img *img)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_context_info *ctxt_info;  	struct iwl_context_info_rbd_cfg *rx_cfg; -	u32 control_flags = 0, rb_size; +	u32 control_flags = 0, rb_size, cb_size;  	dma_addr_t phys;  	int ret; @@ -180,11 +180,11 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,  	ctxt_info->version.version = 0;  	ctxt_info->version.mac_id = -		cpu_to_le16((u16)trans->hw_rev); +		cpu_to_le16((u16)trans->info.hw_rev);  	/* size is in DWs */  	ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); -	switch (trans_pcie->rx_buf_size) { +	switch (trans->conf.rx_buf_size) {  	case IWL_AMSDU_2K:  		rb_size = IWL_CTXT_INFO_RB_SIZE_2K;  		break; @@ -202,11 +202,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,  		rb_size = IWL_CTXT_INFO_RB_SIZE_4K;  	} -	WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12); +	cb_size = RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)); +	if (WARN_ON(cb_size > 12)) +		cb_size = 12; +  	control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG; -	control_flags |= -		u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds), -				IWL_CTXT_INFO_RB_CB_SIZE); +	control_flags |= u32_encode_bits(cb_size, IWL_CTXT_INFO_RB_CB_SIZE);  	control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);  	ctxt_info->control.control_flags = cpu_to_le32(control_flags); @@ -218,12 +219,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,  	/* initialize TX command queue */  	ctxt_info->hcmd_cfg.cmd_queue_addr = -		cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); +		cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);  	ctxt_info->hcmd_cfg.cmd_queue_size =  		TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);  	/* allocate ucode sections in dram and set addresses */ -	ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); +	ret = iwl_pcie_init_fw_sec(trans, img, &ctxt_info->dram);  	if (ret) {  		dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),  				  ctxt_info, trans_pcie->ctxt_info_dma_addr); @@ -232,7 +233,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,  	trans_pcie->ctxt_info = ctxt_info; -	iwl_enable_fw_load_int_ctx_info(trans); +	iwl_enable_fw_load_int_ctx_info(trans, false);  	/* Configure debug, if exists */  	if (iwl_pcie_dbg_on(trans)) diff --git a/sys/contrib/dev/iwlwifi/pcie/drv.c b/sys/contrib/dev/iwlwifi/pcie/drv.c index 8d28ce1374b8..4deb57058c9d 100644 --- a/sys/contrib/dev/iwlwifi/pcie/drv.c +++ b/sys/contrib/dev/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause  /* - * Copyright (C) 2005-2014, 2018-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2025 Intel Corporation   * Copyright (C) 2013-2015 Intel Mobile Communications GmbH   * Copyright (C) 2016-2017 Intel Deutschland GmbH   */ @@ -18,17 +18,19 @@  #include "iwl-trans.h"  #include "iwl-drv.h"  #include "iwl-prph.h" -#include "internal.h" +#include "gen1_2/internal.h" + +#if defined(__FreeBSD__) +#include <sys/rman.h> +#endif -#define TRANS_CFG_MARKER BIT(0)  #define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg),	\  							 struct _struct)  extern int _invalid_type; -#define _TRANS_CFG_MARKER(cfg)						\ -	(__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params),	\ -			       TRANS_CFG_MARKER,			\ -	 __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type))) -#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg)) +#define _TRANS_CFG_CHECK(cfg)						\ +	(__builtin_choose_expr(_IS_A(cfg, iwl_mac_cfg),	\ +			       0, _invalid_type)) +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_CHECK(cfg) + (kernel_ulong_t)&(cfg))  #define IWL_PCI_DEVICE(dev, subdev, cfg) \  	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \ @@ -38,1268 +40,1062 @@ extern int _invalid_type;  /* Hardware specific file defines the PCI IDs table for that hardware module */  VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {  #if IS_ENABLED(CONFIG_IWLDVM) -	{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1304, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1205, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1305, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1206, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1306, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1221, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1321, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1224, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1324, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1225, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1325, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1226, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4232, 0x1326, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4237, 0x1211, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4237, 0x1311, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4237, 0x1214, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4237, 0x1314, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4237, 0x1215, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4237, 0x1315, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4237, 0x1216, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4237, 0x1316, iwl5000_mac_cfg)}, /* Half Mini Card */  /* 5300 Series WiFi */ -	{IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4235, 0x1021, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4235, 0x1121, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4235, 0x1024, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4235, 0x1124, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4235, 0x1001, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4235, 0x1101, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4235, 0x1004, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4235, 0x1104, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4236, 0x1011, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4236, 0x1111, iwl5000_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x4236, 0x1014, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x4236, 0x1114, iwl5000_mac_cfg)}, /* Half Mini Card */  /* 5350 Series WiFi/WiMax */ -	{IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x423A, 0x1001, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x423A, 0x1021, iwl5000_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x423B, 0x1011, iwl5000_mac_cfg)}, /* Mini Card */  /* 5150 Series Wifi/WiMax */ -	{IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ - -	{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ -	{IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ -	{IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_mac_cfg)}, /* Half Mini Card */ + +	{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_mac_cfg)}, /* Half Mini Card */ +	{IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_mac_cfg)}, /* Mini Card */ +	{IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_mac_cfg)}, /* Half Mini Card */  /* 6x00 Series */ -	{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, -	{IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)}, -	{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, -	{IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)}, -	{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, -	{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, -	{IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)}, -	{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, +	{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_mac_cfg)}, +	{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_mac_cfg)}, +	{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_mac_cfg)}, +	{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_mac_cfg)}, +	{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_mac_cfg)}, +	{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_mac_cfg)}, +	{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_mac_cfg)},  /* 6x05 Series */ -	{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, -	{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, -	{IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, -	{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ -	{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ - -/* 6x30 Series */ -	{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, -	{IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, -	{IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, -	{IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_mac_cfg)},/* low 5GHz active */ +	{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_mac_cfg)},/* high 5GHz active */ + +/* 1030/6x30 Series */ +	{IWL_PCI_DEVICE(0x008A, 0x5305, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x008A, 0x5307, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x008A, 0x5325, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x008A, 0x5327, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x008B, 0x5315, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x008B, 0x5317, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_mac_cfg)},  /* 6x50 WiFi/WiMax Series */ -	{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, -	{IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, +	{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_mac_cfg)},  /* 6150 WiFi/WiMax Series */ -	{IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, +	{IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_mac_cfg)},  /* 1000 Series WiFi */ -	{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, +	{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_mac_cfg)},  /* 100 Series WiFi */ -	{IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, -	{IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, -	{IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, +	{IWL_PCI_DEVICE(0x08AE, 0x1005, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08AE, 0x1007, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08AF, 0x1015, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08AF, 0x1017, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08AE, 0x1025, iwl1000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08AE, 0x1027, iwl1000_mac_cfg)},  /* 130 Series WiFi */ -	{IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, -	{IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, +	{IWL_PCI_DEVICE(0x0896, 0x5005, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0896, 0x5007, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0897, 0x5015, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0897, 0x5017, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0896, 0x5025, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0896, 0x5027, iwl6030_mac_cfg)},  /* 2x00 Series */ -	{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, +	{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_mac_cfg)},  /* 2x30 Series */ -	{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, +	{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_mac_cfg)},  /* 6x35 Series */ -	{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)}, -	{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)}, -	{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)}, -	{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, -	{IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, +	{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x088E, 0x406A, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x088F, 0x426A, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x088E, 0x446A, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6030_mac_cfg)}, +	{IWL_PCI_DEVICE(0x088F, 0x5260, iwl6030_mac_cfg)},  /* 105 Series */ -	{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, +	{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_mac_cfg)},  /* 135 Series */ -	{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, -	{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, +	{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_mac_cfg)}, +	{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_mac_cfg)},  #endif /* CONFIG_IWLDVM */  #if IS_ENABLED(CONFIG_IWLMVM)  /* 7260 Series */ -	{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7000_mac_cfg)},  /* 3160 Series */ -	{IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, -	{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x0070, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x0072, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x0170, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x0172, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x0060, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x0062, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B4, 0x0270, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B4, 0x0272, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x0470, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x0472, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B4, 0x0370, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x8070, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x8072, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x8170, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x8172, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x8060, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x8062, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B4, 0x8370, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B4, 0x8272, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x1070, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x08B3, 0x1170, iwl7000_mac_cfg)},  /* 3165 Series */ -	{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, +	{IWL_PCI_DEVICE(0x3165, 0x4010, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3165, 0x4012, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3166, 0x4212, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3165, 0x4410, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3165, 0x4510, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3165, 0x4110, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3166, 0x4310, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3166, 0x4210, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3165, 0x8010, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3165, 0x8110, iwl7000_mac_cfg)},  /* 3168 Series */ -	{IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, +	{IWL_PCI_DEVICE(0x24FB, 0x2010, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FB, 0x2110, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FB, 0x2050, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FB, 0x2150, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FB, 0x0000, iwl7000_mac_cfg)},  /* 7265 Series */ -	{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5100, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5510, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x500A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5102, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9012, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x900A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x9210, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x9200, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x9310, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5420, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5090, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5190, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x5212, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095B, 0x520A, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9000, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9400, iwl7000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7000_mac_cfg)},  /* 8000 Series */ -	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, -	{IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, -	{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F5, 0x0010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F6, 0x0030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xC030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24F3, 0xD030, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x1431, iwl8000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x24FD, 0x1432, iwl8000_mac_cfg)},  /* 9000 Series */ -	{IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)}, -	{IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)}, -	{IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)}, -	{IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)}, -	{IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)}, -	{IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)}, +	{IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_mac_cfg)}, +	{IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_mac_cfg)}, +	{IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_mac_cfg)}, +	{IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_mac_cfg)},  /* Qu devices */ -	{IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, -	{IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, +	{IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_mac_cfg)}, +	{IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_mac_cfg)}, -	{IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, -	{IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, -	{IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, +	{IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)}, +	{IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)}, +	{IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)}, -	{IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, +	{IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_mac_cfg)}, -	{IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)}, +	{IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_mac_cfg)}, -/* So devices */ -	{IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)}, -	{IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, -	{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, -	{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, -	{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, -	{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, -	{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, +/* Ty/So devices */ +	{IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_ty_mac_cfg)}, +	{IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_mac_cfg)}, +	{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_mac_cfg)}, +	{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_mac_cfg)}, +	{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_mac_cfg)}, +	{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_mac_cfg)}, +	{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_mac_cfg)},  /* Ma devices */ -	{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, -	{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, - +	{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_mac_cfg)}, +	{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_mac_cfg)}, +#endif /* CONFIG_IWLMVM */ +#if IS_ENABLED(CONFIG_IWLMVM) || IS_ENABLED(CONFIG_IWLMLD)  /* Bz devices */ -	{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, -	{IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)}, +	{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x1775, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0xA840, 0x1776, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_mac_cfg)}, +	{IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_mac_cfg)},  /* Sc devices */ -	{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)}, -	{IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)}, -	{IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)}, -	{IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)}, -#endif /* CONFIG_IWLMVM */ +	{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_mac_cfg)}, +	{IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_mac_cfg)}, +	{IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_mac_cfg)}, +	{IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_mac_cfg)}, +	{IWL_PCI_DEVICE(0xD240, PCI_ANY_ID, iwl_sc_mac_cfg)}, +#endif /* CONFIG_IWLMVM || CONFIG_IWLMLD */  	{0}  };  MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);  EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids); -#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ -		      _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ -	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ -	  .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \ -	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ -	  .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY } - -#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ -	_IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY,   \ -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,  \ -		      IWL_CFG_ANY, _cfg, _name) +#define _IWL_DEV_INFO(_cfg, _name, ...) {	\ +	.cfg = &_cfg,				\ +	.name = _name,				\ +	.device = IWL_CFG_ANY,			\ +	.subdevice = IWL_CFG_ANY,		\ +	.subdevice_m_h = 15,			\ +	__VA_ARGS__				\ +} +#define IWL_DEV_INFO(_cfg, _name, ...)		\ +	_IWL_DEV_INFO(_cfg, _name, __VA_ARGS__) + +#define DEVICE(n)		.device = (n) +#define SUBDEV(n)		.subdevice = (n) +#define _LOWEST_BIT(n)		(__builtin_ffs(n) - 1) +#define _BIT_ABOVE_MASK(n)	((n) + (1 << _LOWEST_BIT(n))) +#define _HIGHEST_BIT(n)		(__builtin_ffs(_BIT_ABOVE_MASK(n)) - 2) +#define _IS_POW2(n)		(((n) & ((n) - 1)) == 0) +#define _IS_CONTIG(n)		_IS_POW2(_BIT_ABOVE_MASK(n)) +#define _CHECK_MASK(m)		BUILD_BUG_ON_ZERO(!_IS_CONTIG(m)) +#define SUBDEV_MASKED(v, m)	.subdevice = (v) + _CHECK_MASK(m),	\ +				.subdevice_m_l = _LOWEST_BIT(m),	\ +				.subdevice_m_h = _HIGHEST_BIT(m) +#define RF_TYPE(n)		.match_rf_type = 1,			\ +				.rf_type = IWL_CFG_RF_TYPE_##n +#define DISCRETE		.match_discrete = 1,			\ +				.discrete = 1 +#define INTEGRATED		.match_discrete = 1,			\ +				.discrete = 0 +#define RF_ID(n)		.match_rf_id = 1,			\ +				.rf_id = IWL_CFG_RF_ID_##n +#define NO_CDB			.match_cdb = 1, .cdb = 0 +#define CDB			.match_cdb = 1, .cdb = 1 +#define BW_NOT_LIMITED		.match_bw_limit = 1, .bw_limit = 0 +#define BW_LIMITED		.match_bw_limit = 1, .bw_limit = 1  VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { -#if IS_ENABLED(CONFIG_IWLMVM) -/* 9000 */ -	IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name), -	IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), -	IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), -	IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), -	IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), -	IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), -	IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), -	IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), -	IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), -	IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), -	IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), -	IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), -	IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), -	IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), -	IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), -	IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), -	IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), -	IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), -	IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), -	IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), -	IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), -	IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - -	IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), -	IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name), -	IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma, iwl_ax411_killer_1690i_name), - -/* AX200 */ -	IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), -	IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name), -	IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name), - -	/* Qu with Hr */ -	IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), -	IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), -	IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), -	IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), -	IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), -	IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), -	IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), -	IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - -	IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), -	IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), -	IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - -	IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), -	IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), -	IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), -	IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), - -	/* So with HR */ -	IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x2020, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL), -	IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name), -	IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name), -	IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL), -	IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL), -	IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL), -	IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL), -	IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL), -	IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL), -	IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), -	IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL), -	IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL), -	IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL), -	IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL), -	IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL), - -	/* So with JF */ -	IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), -	IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), -	IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), -	IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), - -	/* SO with GF2 */ -	IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), -	IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), -	IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), -	IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), -	IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), -	IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), -	IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), -	IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), -	IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), -	IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), -	IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), -	IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), -	IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), -	IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), - -	/* MA with GF2 */ -	IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma, iwl_ax211_killer_1675s_name), -	IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma, iwl_ax211_killer_1675i_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_2ac_cfg_soc, iwl9461_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_2ac_cfg_soc, iwl9461_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_2ac_cfg_soc, iwl9462_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_2ac_cfg_soc, iwl9462_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_2ac_cfg_soc, iwl9560_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_2ac_cfg_soc, iwl9560_name), - -	_IWL_DEV_INFO(0x2526, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, -		      iwl9260_2ac_cfg, iwl9270_160_name), -	_IWL_DEV_INFO(0x2526, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, -		      iwl9260_2ac_cfg, iwl9270_name), - -	_IWL_DEV_INFO(0x271B, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9260_2ac_cfg, iwl9162_160_name), -	_IWL_DEV_INFO(0x271B, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9260_2ac_cfg, iwl9162_name), - -	_IWL_DEV_INFO(0x2526, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9260_2ac_cfg, iwl9260_160_name), -	_IWL_DEV_INFO(0x2526, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9260_2ac_cfg, iwl9260_name), - -/* Qu with Jf */ -	/* Qu B step */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), - -	/* Qu C step */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), - -	/* QuZ */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), - -/* Qu with Hr */ -	/* Qu B step */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_qu_b0_hr1_b0, iwl_ax101_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_qu_b0_hr_b0, iwl_ax203_name), - -	/* Qu C step */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_qu_c0_hr1_b0, iwl_ax101_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_qu_c0_hr_b0, iwl_ax203_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_qu_c0_hr_b0, iwl_ax201_name), - -	/* QuZ */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_quz_a0_hr1_b0, iwl_ax101_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_quz_a0_hr_b0, iwl_ax201_name), - -/* Ma */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_ma, iwl_ax201_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      iwl_cfg_ma, iwl_ax211_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_ma, iwl_ax231_name), - -/* So with Hr */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_so_a0_hr_a0, iwl_ax203_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_so_a0_hr_a0, iwl_ax101_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_so_a0_hr_a0, iwl_ax201_name), - -/* So-F with Hr */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_so_a0_hr_a0, iwl_ax203_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_so_a0_hr_a0, iwl_ax101_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_so_a0_hr_a0, iwl_ax201_name), - -/* So-F with Gf */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, -		      iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), - -/* SoF with JF2 */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), - -/* SoF with JF */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), - -/* So with GF */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, -		      iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), - -/* So with JF2 */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), - -/* So with JF */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, -		      IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, -		      iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), - -/* Bz */ -/* FIXME: need to change the naming according to the actual CRF */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      iwl_cfg_bz, iwl_fm_name), - -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      iwl_cfg_bz, iwl_fm_name), - -/* Ga (Gl) */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_gl, iwl_gl_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, -		      IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, -		      iwl_cfg_gl, iwl_mtp_name), - -/* Sc */ -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      iwl_cfg_sc, iwl_sc_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      iwl_cfg_sc2, iwl_sc2_name), -	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, -		      iwl_cfg_sc2f, iwl_sc2f_name), -#endif /* CONFIG_IWLMVM */ -}; -EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table); +#if IS_ENABLED(CONFIG_IWLDVM) +	IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name, +		     DEVICE(0x4232), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name, +		     DEVICE(0x4232), SUBDEV_MASKED(0x4, 0xF)), +	IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_bgn_name, +		     DEVICE(0x4232), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl5100_abg_cfg, iwl5100_abg_name, +		     DEVICE(0x4232), SUBDEV_MASKED(0x6, 0xF)), +	IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name, +		     DEVICE(0x4237), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name, +		     DEVICE(0x4237), SUBDEV_MASKED(0x4, 0xF)), +	IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_bgn_name, +		     DEVICE(0x4237), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl5100_abg_cfg, iwl5100_abg_name, +		     DEVICE(0x4237), SUBDEV_MASKED(0x6, 0xF)), -#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) -const unsigned int iwl_dev_info_table_size = ARRAY_SIZE(iwl_dev_info_table); -EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size); -#endif +/* 5300 Series WiFi */ +	IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name, +		     DEVICE(0x4235), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name, +		     DEVICE(0x4235), SUBDEV_MASKED(0x4, 0xF)), +	IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name, +		     DEVICE(0x4236), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name, +		     DEVICE(0x4236), SUBDEV_MASKED(0x4, 0xF)), -/* - * Read rf id and cdb info from prph register and store it - */ -static void get_crf_id(struct iwl_trans *iwl_trans) -{ -	u32 sd_reg_ver_addr; -	u32 val = 0; -	u8 step; - -	if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) -		sd_reg_ver_addr = SD_REG_VER_GEN2; -	else -		sd_reg_ver_addr = SD_REG_VER; - -	/* Enable access to peripheral registers */ -	val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); -	val |= WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK; -	iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); - -	/* Read crf info */ -	iwl_trans->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); - -	/* Read cnv info */ -	iwl_trans->hw_cnv_id = -		iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); - -	/* For BZ-W, take B step also when A step is indicated */ -	if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) -		step = SILICON_B_STEP; - -	/* In BZ, the MAC step must be read from the CNVI aux register */ -	if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) { -		step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id); - -		/* For BZ-U, take B step also when A step is indicated */ -		if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) == -		    CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) && -		    step == SILICON_A_STEP) -			step = SILICON_B_STEP; -	} +/* 5350 Series WiFi/WiMax */ +	IWL_DEV_INFO(iwl5350_agn_cfg, iwl5350_agn_name, +		     DEVICE(0x423A)), +	IWL_DEV_INFO(iwl5350_agn_cfg, iwl5350_agn_name, +		     DEVICE(0x423B)), -	if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ || -	    CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) { -		iwl_trans->hw_rev_step = step; -		iwl_trans->hw_rev |= step; -	} +/* 5150 Series Wifi/WiMax */ +	IWL_DEV_INFO(iwl5150_agn_cfg, iwl5150_agn_name, +		     DEVICE(0x423C), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl5150_abg_cfg, iwl5150_abg_name, +		     DEVICE(0x423C), SUBDEV_MASKED(0x6, 0xF)), -	/* Read cdb info (also contains the jacket info if needed in the future */ -	iwl_trans->hw_wfpm_id = -		iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); -	IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n", -		 iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id, -		 iwl_trans->hw_wfpm_id); -} +	IWL_DEV_INFO(iwl5150_agn_cfg, iwl5150_agn_name, +		     DEVICE(0x423D), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl5150_abg_cfg, iwl5150_abg_name, +		     DEVICE(0x423D), SUBDEV_MASKED(0x6, 0xF)), -/* - * In case that there is no OTP on the NIC, map the rf id and cdb info - * from the prph registers. - */ -static int map_crf_id(struct iwl_trans *iwl_trans) -{ -	int ret = 0; -	u32 val = iwl_trans->hw_crf_id; -	u32 step_id = REG_CRF_ID_STEP(val); -	u32 slave_id = REG_CRF_ID_SLAVE(val); -	u32 jacket_id_cnv  = REG_CRF_ID_SLAVE(iwl_trans->hw_cnv_id); -	u32 jacket_id_wfpm  = WFPM_OTP_CFG1_IS_JACKET(iwl_trans->hw_wfpm_id); -	u32 cdb_id_wfpm  = WFPM_OTP_CFG1_IS_CDB(iwl_trans->hw_wfpm_id); - -	/* Map between crf id to rf id */ -	switch (REG_CRF_ID_TYPE(val)) { -	case REG_CRF_ID_TYPE_JF_1: -		iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12); -		break; -	case REG_CRF_ID_TYPE_JF_2: -		iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); -		break; -	case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1: -		iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); -		break; -	case REG_CRF_ID_TYPE_HR_NONE_CDB: -		iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); -		break; -	case REG_CRF_ID_TYPE_HR_CDB: -		iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); -		break; -	case REG_CRF_ID_TYPE_GF: -		iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); -		break; -	case REG_CRF_ID_TYPE_FM: -		iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); -		break; -	case REG_CRF_ID_TYPE_WHP: -		iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12); -		break; -	default: -		ret = -EIO; -		IWL_ERR(iwl_trans, -			"Can't find a correct rfid for crf id 0x%x\n", -			REG_CRF_ID_TYPE(val)); -		goto out; +/* 6x00 Series */ +	IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name, +		     DEVICE(0x422B), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name, +		     DEVICE(0x422B), SUBDEV_MASKED(0x8, 0xF)), +	IWL_DEV_INFO(iwl6000i_2agn_cfg, iwl6000i_2agn_name, +		     DEVICE(0x422C), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2abg_name, +		     DEVICE(0x422C), SUBDEV_MASKED(0x6, 0xF)), +	IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2bg_name, +		     DEVICE(0x422C), SUBDEV_MASKED(0x7, 0xF)), +	IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name, +		     DEVICE(0x4238), SUBDEV(0x1111)), +	IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name, +		     DEVICE(0x4238), SUBDEV(0x1118)), +	IWL_DEV_INFO(iwl6000i_2agn_cfg, iwl6000i_2agn_name, +		     DEVICE(0x4239), SUBDEV(0x1311)), +	IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2abg_name, +		     DEVICE(0x4239), SUBDEV(0x1316)), -	} +/* 6x05 Series */ +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name, +		     DEVICE(0x0082), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2abg_name, +		     DEVICE(0x0082), SUBDEV_MASKED(0x6, 0xF)), +	IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2bg_name, +		     DEVICE(0x0082), SUBDEV_MASKED(0x7, 0xF)), +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name, +		     DEVICE(0x0082), SUBDEV_MASKED(0x8, 0xF)), + +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name, +		     DEVICE(0x0085), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name, +		     DEVICE(0x0085), SUBDEV_MASKED(0x8, 0xF)), +	IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2abg_name, +		     DEVICE(0x0085), SUBDEV_MASKED(0x6, 0xF)), + +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_sff_name, +		     DEVICE(0x0082), SUBDEV_MASKED(0xC000, 0xF000)), +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_sff_name, +		     DEVICE(0x0085), SUBDEV_MASKED(0xC000, 0xF000)), +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_d_name, +		     DEVICE(0x0082), SUBDEV(0x4820)), +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow1_name, +		     DEVICE(0x0082), SUBDEV(0x1304)),/* low 5GHz active */ +	IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow2_name, +		     DEVICE(0x0082), SUBDEV(0x1305)),/* high 5GHz active */ -	/* Set Step-id */ -	iwl_trans->hw_rf_id |= (step_id << 8); +/* 6x30 Series */ +	IWL_DEV_INFO(iwl6030_n_cfg, iwl1030_bgn_name, +		     DEVICE(0x008A), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl6030_non_n_cfg, iwl1030_bg_name, +		     DEVICE(0x008A), SUBDEV_MASKED(0x7, 0xF)), +	IWL_DEV_INFO(iwl6030_n_cfg, iwl1030_bgn_name, +		     DEVICE(0x008B), SUBDEV(0x5315)), +	IWL_DEV_INFO(iwl6030_non_n_cfg, iwl1030_bg_name, +		     DEVICE(0x008B), SUBDEV(0x5317)), +	IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2agn_name, +		     DEVICE(0x0090), SUBDEV(0x5211)), +	IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2bgn_name, +		     DEVICE(0x0090), SUBDEV(0x5215)), +	IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2abg_name, +		     DEVICE(0x0090), SUBDEV(0x5216)), +	IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2agn_name, +		     DEVICE(0x0091), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2bgn_name, +		     DEVICE(0x0091), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2abg_name, +		     DEVICE(0x0091), SUBDEV_MASKED(0x6, 0xF)), +	IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2bg_name, +		     DEVICE(0x0091), SUBDEV(0x5207)), -	/* Set CDB capabilities */ -	if (cdb_id_wfpm || slave_id) { -		iwl_trans->hw_rf_id += BIT(28); -		IWL_INFO(iwl_trans, "Adding cdb to rf id\n"); -	} +/* 6x50 WiFi/WiMax Series */ +	IWL_DEV_INFO(iwl6050_2agn_cfg, iwl6050_2agn_name, +		     DEVICE(0x0087), SUBDEV_MASKED(0x1, 0xF)), +	IWL_DEV_INFO(iwl6050_2abg_cfg, iwl6050_2abg_name, +		     DEVICE(0x0087), SUBDEV_MASKED(0x6, 0xF)), +	IWL_DEV_INFO(iwl6050_2agn_cfg, iwl6050_2agn_name, +		     DEVICE(0x0089), SUBDEV(0x1311)), +	IWL_DEV_INFO(iwl6050_2abg_cfg, iwl6050_2abg_name, +		     DEVICE(0x0089), SUBDEV(0x1316)), -	/* Set Jacket capabilities */ -	if (jacket_id_wfpm || jacket_id_cnv) { -		iwl_trans->hw_rf_id += BIT(29); -		IWL_INFO(iwl_trans, "Adding jacket to rf id\n"); -	} +/* 6150 WiFi/WiMax Series */ +	IWL_DEV_INFO(iwl6150_bgn_cfg, iwl6150_bgn_name, +		     DEVICE(0x0885), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl6150_bg_cfg, iwl6150_bg_name, +		     DEVICE(0x0885), SUBDEV_MASKED(0x7, 0xF)), +	IWL_DEV_INFO(iwl6150_bgn_cfg, iwl6150_bgn_name, +		     DEVICE(0x0886), SUBDEV(0x1315)), +	IWL_DEV_INFO(iwl6150_bg_cfg, iwl6150_bg_name, +		     DEVICE(0x0886), SUBDEV(0x1317)), -	IWL_INFO(iwl_trans, -		 "Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n", -		 REG_CRF_ID_TYPE(val), step_id, slave_id, iwl_trans->hw_rf_id); -	IWL_INFO(iwl_trans, -		 "Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n", -		 cdb_id_wfpm, jacket_id_wfpm, iwl_trans->hw_wfpm_id); -	IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n", -		 jacket_id_cnv, iwl_trans->hw_cnv_id); +/* 1000 Series WiFi */ +	IWL_DEV_INFO(iwl1000_bgn_cfg, iwl1000_bgn_name, +		     DEVICE(0x0083), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name, +		     DEVICE(0x0083), SUBDEV_MASKED(0x6, 0xF)), +	IWL_DEV_INFO(iwl1000_bgn_cfg, iwl1000_bgn_name, +		     DEVICE(0x0084), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name, +		     DEVICE(0x0084), SUBDEV_MASKED(0x6, 0xF)), -out: -	return ret; -} +/* 100 Series WiFi */ +	IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name, +		     DEVICE(0x08AE), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl100_bg_cfg, iwl100_bg_name, +		     DEVICE(0x08AE), SUBDEV_MASKED(0x7, 0xF)), +	IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name, +		     DEVICE(0x08AF), SUBDEV(0x1015)), +	IWL_DEV_INFO(iwl100_bg_cfg, iwl100_bg_name, +		     DEVICE(0x08AF), SUBDEV(0x1017)), + +/* 130 Series WiFi */ +	IWL_DEV_INFO(iwl130_bgn_cfg, iwl130_bgn_name, +		     DEVICE(0x0896), SUBDEV_MASKED(0x5, 0xF)), +	IWL_DEV_INFO(iwl130_bg_cfg, iwl130_bg_name, +		     DEVICE(0x0896), SUBDEV_MASKED(0x7, 0xF)), +	IWL_DEV_INFO(iwl130_bgn_cfg, iwl130_bgn_name, +		     DEVICE(0x0897), SUBDEV(0x5015)), +	IWL_DEV_INFO(iwl130_bg_cfg, iwl130_bg_name, +		     DEVICE(0x0897), SUBDEV(0x5017)), + +/* 2x00 Series */ +	IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name, +		     DEVICE(0x0890), SUBDEV(0x4022)), +	IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name, +		     DEVICE(0x0891), SUBDEV(0x4222)), +	IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name, +		     DEVICE(0x0890), SUBDEV(0x4422)), +	IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_d_name, +		     DEVICE(0x0890), SUBDEV(0x4822)), + +/* 2x30 Series */ +	IWL_DEV_INFO(iwl2030_2bgn_cfg, iwl2030_2bgn_name, +		     DEVICE(0x0887)), +	IWL_DEV_INFO(iwl2030_2bgn_cfg, iwl2030_2bgn_name, +		     DEVICE(0x0888), SUBDEV(0x4262)), + +/* 6x35 Series */ +	IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_name, +		     DEVICE(0x088E), SUBDEV_MASKED(0x0, 0xF)), +	IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_sff_name, +		     DEVICE(0x088E), SUBDEV_MASKED(0xA, 0xF)), +	IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_name, +		     DEVICE(0x088F), SUBDEV_MASKED(0x0, 0xF)), +	IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_sff_name, +		     DEVICE(0x088F), SUBDEV_MASKED(0xA, 0xF)), + +/* 105 Series */ +	IWL_DEV_INFO(iwl105_bgn_cfg, iwl105_bgn_name, +		     DEVICE(0x0894)), +	IWL_DEV_INFO(iwl105_bgn_cfg, iwl105_bgn_name, +		     DEVICE(0x0895), SUBDEV(0x0222)), + +/* 135 Series */ +	IWL_DEV_INFO(iwl135_bgn_cfg, iwl135_bgn_name, +		     DEVICE(0x0892)), +	IWL_DEV_INFO(iwl135_bgn_cfg, iwl135_bgn_name, +		     DEVICE(0x0893), SUBDEV(0x0262)), +#endif /* CONFIG_IWLDVM */ + +#if IS_ENABLED(CONFIG_IWLMVM) +/* 7260 Series */ +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B1)), // unlisted ones fall through to here +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x4060)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x406A)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x4160)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B1), SUBDEV(0x4062)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B1), SUBDEV(0x4162)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x4460)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x446A)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B1), SUBDEV(0x4462)), +	IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B1), SUBDEV(0x4A70)), +	IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B1), SUBDEV(0x4A6E)), +	IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B1), SUBDEV(0x4A6C)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x4560)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x4020)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x402A)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0x4420)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC060)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC06A)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC160)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B1), SUBDEV(0xC062)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B1), SUBDEV(0xC162)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC760)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC460)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B1), SUBDEV(0xC462)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC560)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC360)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC020)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC02A)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B1), SUBDEV(0xC420)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B2), SUBDEV(0x4270)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B2), SUBDEV(0x4272)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B2), SUBDEV(0x4260)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B2), SUBDEV(0x426A)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B2), SUBDEV(0x4262)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B2), SUBDEV(0x4370)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B2), SUBDEV(0x4360)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B2), SUBDEV(0x4220)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B2), SUBDEV(0xC270)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B2), SUBDEV(0xC272)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B2), SUBDEV(0xC260)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B2), SUBDEV(0xC26A)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, +		     DEVICE(0x08B2), SUBDEV(0xC262)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, +		     DEVICE(0x08B2), SUBDEV(0xC370)), +	IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, +		     DEVICE(0x08B2), SUBDEV(0xC220)), + +/* 3160 Series */ +	IWL_DEV_INFO(iwl3160_cfg, iwl3160_2ac_name, +		     DEVICE(0x08B3)), + +	IWL_DEV_INFO(iwl3160_cfg, iwl3160_n_name, +		     DEVICE(0x08B3), SUBDEV_MASKED(0x62, 0xFF)), +	IWL_DEV_INFO(iwl3160_cfg, iwl3160_2n_name, +		     DEVICE(0x08B3), SUBDEV_MASKED(0x60, 0xFF)), + +	IWL_DEV_INFO(iwl3160_cfg, iwl3160_2ac_name, +		     DEVICE(0x08B4)), + +/* 3165 Series */ +	IWL_DEV_INFO(iwl3165_2ac_cfg, iwl3165_2ac_name, +		     DEVICE(0x3165)), +	IWL_DEV_INFO(iwl3165_2ac_cfg, iwl3165_2ac_name, +		     DEVICE(0x3166)), + +/* 3168 Series */ +	IWL_DEV_INFO(iwl3168_2ac_cfg, iwl3168_2ac_name, +		     DEVICE(0x24FB)), + +/* 7265 Series */ +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2ac_name, +		     DEVICE(0x095A)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5000)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x500A)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name, +		     DEVICE(0x095A), SUBDEV(0x5002)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name, +		     DEVICE(0x095A), SUBDEV(0x5102)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5020)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x502A)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5090)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5190)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5100)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5400)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5420)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5490)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5C10)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x5590)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x9000)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x900A)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095A), SUBDEV(0x9400)), + +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2ac_name, +		     DEVICE(0x095B)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095B), SUBDEV(0x520A)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name, +		     DEVICE(0x095B), SUBDEV(0x5302)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095B), SUBDEV(0x5200)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name, +		     DEVICE(0x095B), SUBDEV(0x5202)), +	IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, +		     DEVICE(0x095B), SUBDEV(0x9200)), + +/* 8000 Series */ +	IWL_DEV_INFO(iwl8260_cfg, iwl8260_2ac_name, +		     DEVICE(0x24F3)), +	IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name, +		     DEVICE(0x24F3), SUBDEV(0x0004)), +	IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name, +		     DEVICE(0x24F3), SUBDEV(0x0044)), +	IWL_DEV_INFO(iwl8260_cfg, iwl8260_2ac_name, +		     DEVICE(0x24F4)), +	IWL_DEV_INFO(iwl8260_cfg, iwl4165_2ac_name, +		     DEVICE(0x24F5)), +	IWL_DEV_INFO(iwl8260_cfg, iwl4165_2ac_name, +		     DEVICE(0x24F6)), +	IWL_DEV_INFO(iwl8265_cfg, iwl8265_2ac_name, +		     DEVICE(0x24FD)), +	IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name, +		     DEVICE(0x24FD), SUBDEV(0x3E02)), +	IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name, +		     DEVICE(0x24FD), SUBDEV(0x3E01)), +	IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name, +		     DEVICE(0x24FD), SUBDEV(0x1012)), +	IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name, +		     DEVICE(0x24FD), SUBDEV(0x0012)), +	IWL_DEV_INFO(iwl8265_cfg, iwl_killer_1435i_name, +		     DEVICE(0x24FD), SUBDEV(0x1431)), +	IWL_DEV_INFO(iwl8265_cfg, iwl_killer_1434_kix_name, +		     DEVICE(0x24FD), SUBDEV(0x1432)), + +/* JF1 RF */ +	IWL_DEV_INFO(iwl_rf_jf, iwl9461_160_name, +		     RF_TYPE(JF1)), +	IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9461_name, +		     RF_TYPE(JF1), BW_LIMITED), +	IWL_DEV_INFO(iwl_rf_jf, iwl9462_160_name, +		     RF_TYPE(JF1), RF_ID(JF1_DIV)), +	IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9462_name, +		     RF_TYPE(JF1), RF_ID(JF1_DIV), BW_LIMITED), +/* JF2 RF */ +	IWL_DEV_INFO(iwl_rf_jf, iwl9260_160_name, +		     RF_TYPE(JF2)), +	IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9260_name, +		     RF_TYPE(JF2), BW_LIMITED), +	IWL_DEV_INFO(iwl_rf_jf, iwl9560_160_name, +		     RF_TYPE(JF2), RF_ID(JF)), +	IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9560_name, +		     RF_TYPE(JF2), RF_ID(JF), BW_LIMITED), + +/* HR RF */ +	IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_name, RF_TYPE(HR2)), +	IWL_DEV_INFO(iwl_rf_hr_80mhz, iwl_ax101_name, RF_TYPE(HR1)), +	IWL_DEV_INFO(iwl_rf_hr_80mhz, iwl_ax203_name, RF_TYPE(HR2), BW_LIMITED), +	IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_name, DEVICE(0x2723)), + +/* GF RF */ +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_name, RF_TYPE(GF)), +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_name, RF_TYPE(GF), CDB), +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_name, DEVICE(0x2725)), + +/* Killer CRFs */ +	IWL_DEV_INFO(iwl_rf_jf, iwl9260_killer_1550_name, SUBDEV(0x1550)), +	IWL_DEV_INFO(iwl_rf_jf, iwl9560_killer_1550s_name, SUBDEV(0x1551)), +	IWL_DEV_INFO(iwl_rf_jf, iwl9560_killer_1550i_name, SUBDEV(0x1552)), + +	IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_killer_1650s_name, SUBDEV(0x1651)), +	IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_killer_1650i_name, SUBDEV(0x1652)), + +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_killer_1675s_name, SUBDEV(0x1671)), +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_killer_1675i_name, SUBDEV(0x1672)), +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_killer_1675w_name, SUBDEV(0x1673)), +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_killer_1675x_name, SUBDEV(0x1674)), +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_killer_1690s_name, SUBDEV(0x1691)), +	IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_killer_1690i_name, SUBDEV(0x1692)), + +/* Killer discrete */ +	IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_killer_1650w_name, +		     DEVICE(0x2723), SUBDEV(0x1653)), +	IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_killer_1650x_name, +		     DEVICE(0x2723), SUBDEV(0x1654)), +#endif /* CONFIG_IWLMVM */ +#if IS_ENABLED(CONFIG_IWLMLD) +/* FM RF */ +	IWL_DEV_INFO(iwl_rf_fm, iwl_be201_name, RF_TYPE(FM)), +	IWL_DEV_INFO(iwl_rf_fm, iwl_be401_name, RF_TYPE(FM), CDB), +	IWL_DEV_INFO(iwl_rf_fm, iwl_be200_name, RF_TYPE(FM), +		     DEVICE(0x272B), DISCRETE), +	IWL_DEV_INFO(iwl_rf_fm_160mhz, iwl_be202_name, +		     RF_TYPE(FM), BW_LIMITED), + +/* Killer CRFs */ +	IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750s_name, SUBDEV(0x1771)), +	IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750i_name, SUBDEV(0x1772)), +	IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1790s_name, SUBDEV(0x1791)), +	IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1790i_name, SUBDEV(0x1792)), + +/* Killer discrete */ +	IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750w_name, +		     DEVICE(0x272B), SUBDEV(0x1773)), +	IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750x_name, +		     DEVICE(0x272B), SUBDEV(0x1774)), + +/* WH RF */ +	IWL_DEV_INFO(iwl_rf_wh, iwl_be211_name, RF_TYPE(WH)), +	IWL_DEV_INFO(iwl_rf_wh_160mhz, iwl_be213_name, RF_TYPE(WH), BW_LIMITED), + +/* PE RF */ +	IWL_DEV_INFO(iwl_rf_pe, iwl_bn201_name, RF_TYPE(PE)), +	IWL_DEV_INFO(iwl_rf_pe, iwl_be223_name, RF_TYPE(PE), SUBDEV(0x0524)), +	IWL_DEV_INFO(iwl_rf_pe, iwl_be221_name, RF_TYPE(PE), SUBDEV(0x0324)), + +/* Killer */ +	IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775s_name, SUBDEV(0x1776)), +	IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775i_name, SUBDEV(0x1775)), + +	IWL_DEV_INFO(iwl_rf_pe, iwl_killer_bn1850w2_name, SUBDEV(0x1851)), +	IWL_DEV_INFO(iwl_rf_pe, iwl_killer_bn1850i_name, SUBDEV(0x1852)), +#endif /* CONFIG_IWLMLD */ +}; +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table); + +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +const unsigned int iwl_dev_info_table_size = ARRAY_SIZE(iwl_dev_info_table); +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size); +#endif  /* PCI registers */  #define PCI_CFG_RETRY_TIMEOUT	0x041 -VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info * -iwl_pci_find_dev_info(u16 device, u16 subsystem_device, -		      u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, -		      u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) +const struct iwl_dev_info * +iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 rf_type, u8 cdb, +		      u8 rf_id, u8 bw_limit, bool discrete)  {  	int num_devices = ARRAY_SIZE(iwl_dev_info_table);  	int i; @@ -1309,49 +1105,32 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,  	for (i = num_devices - 1; i >= 0; i--) {  		const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; +		u16 subdevice_mask;  		if (dev_info->device != (u16)IWL_CFG_ANY &&  		    dev_info->device != device)  			continue; -		if (dev_info->subdevice != (u16)IWL_CFG_ANY && -		    dev_info->subdevice != subsystem_device) -			continue; - -		if (dev_info->mac_type != (u16)IWL_CFG_ANY && -		    dev_info->mac_type != mac_type) -			continue; - -		if (dev_info->mac_step != (u8)IWL_CFG_ANY && -		    dev_info->mac_step != mac_step) -			continue; - -		if (dev_info->rf_type != (u16)IWL_CFG_ANY && -		    dev_info->rf_type != rf_type) -			continue; +		subdevice_mask = GENMASK(dev_info->subdevice_m_h, +					 dev_info->subdevice_m_l); -		if (dev_info->cdb != (u8)IWL_CFG_ANY && -		    dev_info->cdb != cdb) +		if (dev_info->subdevice != (u16)IWL_CFG_ANY && +		    dev_info->subdevice != (subsystem_device & subdevice_mask))  			continue; -		if (dev_info->jacket != (u8)IWL_CFG_ANY && -		    dev_info->jacket != jacket) +		if (dev_info->match_rf_type && dev_info->rf_type != rf_type)  			continue; -		if (dev_info->rf_id != (u8)IWL_CFG_ANY && -		    dev_info->rf_id != rf_id) +		if (dev_info->match_cdb && dev_info->cdb != cdb)  			continue; -		if (dev_info->no_160 != (u8)IWL_CFG_ANY && -		    dev_info->no_160 != no_160) +		if (dev_info->match_rf_id && dev_info->rf_id != rf_id)  			continue; -		if (dev_info->cores != (u8)IWL_CFG_ANY && -		    dev_info->cores != cores) +		if (dev_info->match_bw_limit && dev_info->bw_limit != bw_limit)  			continue; -		if (dev_info->rf_step != (u8)IWL_CFG_ANY && -		    dev_info->rf_step != rf_step) +		if (dev_info->match_discrete && dev_info->discrete != discrete)  			continue;  		return dev_info; @@ -1363,206 +1142,65 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);  static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  { -	const struct iwl_cfg_trans_params *trans; -	const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; -	const struct iwl_dev_info *dev_info; -	struct iwl_trans *iwl_trans; -	struct iwl_trans_pcie *trans_pcie; +	const struct iwl_mac_cfg *mac_cfg = (void *)ent->driver_data; +	u8 __iomem *hw_base; +	u32 bar0, hw_rev;  	int ret; -	const struct iwl_cfg *cfg; - -	trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); - -	/* -	 * This is needed for backwards compatibility with the old -	 * tables, so we don't need to change all the config structs -	 * at the same time.  The cfg is used to compare with the old -	 * full cfg structs. -	 */ -	cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); -	/* make sure trans is the first element in iwl_cfg */ -	BUILD_BUG_ON(offsetof(struct iwl_cfg, trans)); - -	iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans); -	if (IS_ERR(iwl_trans)) -		return PTR_ERR(iwl_trans); - -	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); - -	/* -	 * Let's try to grab NIC access early here. Sometimes, NICs may -	 * fail to initialize, and if that happens it's better if we see -	 * issues early on (and can reprobe, per the logic inside), than -	 * first trying to load the firmware etc. and potentially only -	 * detecting any problems when the first interface is brought up. -	 */ -	ret = iwl_pcie_prepare_card_hw(iwl_trans); -	if (!ret) { -		ret = iwl_finish_nic_init(iwl_trans); +	/* reassign our BAR 0 if invalid due to possible runtime PM races */ +	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); +	if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { +		ret = pci_assign_resource(pdev, 0);  		if (ret) -			goto out_free_trans; -		if (iwl_trans_grab_nic_access(iwl_trans)) { -			get_crf_id(iwl_trans); -			/* all good */ -			iwl_trans_release_nic_access(iwl_trans); -		} else { -			ret = -EIO; -			goto out_free_trans; -		} -	} - -	iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); - -	/* -	 * The RF_ID is set to zero in blank OTP so read version to -	 * extract the RF_ID. -	 * This is relevant only for family 9000 and up. -	 */ -	if (iwl_trans->trans_cfg->rf_id && -	    iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && -	    !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && map_crf_id(iwl_trans)) { -		ret = -EINVAL; -		goto out_free_trans; +			return ret;  	} -	IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", -		 pdev->device, pdev->subsystem_device, -		 iwl_trans->hw_rev, iwl_trans->hw_rf_id); - -	dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, -					 CSR_HW_REV_TYPE(iwl_trans->hw_rev), -					 iwl_trans->hw_rev_step, -					 CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id), -					 CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), -					 CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), -					 IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), -					 IWL_SUBDEVICE_NO_160(pdev->subsystem_device), -					 IWL_SUBDEVICE_CORES(pdev->subsystem_device), -					 CSR_HW_RFID_STEP(iwl_trans->hw_rf_id)); -	if (dev_info) { -		iwl_trans->cfg = dev_info->cfg; -		iwl_trans->name = dev_info->name; -		iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160; -	} - -#if IS_ENABLED(CONFIG_IWLMVM) -	/* -	 * special-case 7265D, it has the same PCI IDs. -	 * -	 * Note that because we already pass the cfg to the transport above, -	 * all the parameters that the transport uses must, until that is -	 * changed, be identical to the ones in the 7265D configuration. -	 */ -	if (cfg == &iwl7265_2ac_cfg) -		cfg_7265d = &iwl7265d_2ac_cfg; -	else if (cfg == &iwl7265_2n_cfg) -		cfg_7265d = &iwl7265d_2n_cfg; -	else if (cfg == &iwl7265_n_cfg) -		cfg_7265d = &iwl7265d_n_cfg; -	if (cfg_7265d && -	    (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) -		iwl_trans->cfg = cfg_7265d; +	ret = pcim_enable_device(pdev); +	if (ret) +		return ret; -	/* -	 * This is a hack to switch from Qu B0 to Qu C0.  We need to -	 * do this for all cfgs that use Qu B0, except for those using -	 * Jf, which have already been moved to the new table.  The -	 * rest must be removed once we convert Qu with Hr as well. -	 */ -	if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { -		if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) -			iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; -		else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) -			iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; -		else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) -			iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; -	} +	pci_set_master(pdev); -	/* same thing for QuZ... */ -	if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { -		if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) -			iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; -		else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) -			iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr; -		else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) -			iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr; +	ret = pcim_request_all_regions(pdev, DRV_NAME); +	if (ret) { +		dev_err(&pdev->dev, "Requesting all PCI BARs failed.\n"); +		return ret;  	} +#if defined(__FreeBSD__) +	linuxkpi_pcim_want_to_use_bus_functions(pdev);  #endif -	/* -	 * If we didn't set the cfg yet, the PCI ID table entry should have -	 * been a full config - if yes, use it, otherwise fail. -	 */ -	if (!iwl_trans->cfg) { -		if (ent->driver_data & TRANS_CFG_MARKER) { -			pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", -			       pdev->device, pdev->subsystem_device, -			       iwl_trans->hw_rev, iwl_trans->hw_rf_id); -			ret = -EINVAL; -			goto out_free_trans; -		} -		iwl_trans->cfg = cfg; -	} - -	/* if we don't have a name yet, copy name from the old cfg */ -	if (!iwl_trans->name) -		iwl_trans->name = iwl_trans->cfg->name; - -	IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name); - -	if (iwl_trans->trans_cfg->mq_rx_supported) { -		if (WARN_ON(!iwl_trans->cfg->num_rbds)) { -			ret = -EINVAL; -			goto out_free_trans; -		} -		trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds; -	} else { -		trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; -	} - -	if (!iwl_trans->trans_cfg->integrated) { -		u16 link_status; - -		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status); -		iwl_trans->pcie_link_speed = -			u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS); +	hw_base = pcim_iomap(pdev, 0, 0); +	if (!hw_base) { +		dev_err(&pdev->dev, "Failed to map BAR 0.\n"); +		return -ENOMEM;  	} -	ret = iwl_trans_init(iwl_trans); -	if (ret) -		goto out_free_trans; - -	pci_set_drvdata(pdev, iwl_trans); - -	/* try to get ownership so that we'll know if we don't own it */ -	iwl_pcie_prepare_card_hw(iwl_trans); - -	iwl_trans->drv = iwl_drv_start(iwl_trans); - -	if (IS_ERR(iwl_trans->drv)) { -		ret = PTR_ERR(iwl_trans->drv); -		goto out_free_trans; +	/* We can't use iwl_read32 because trans wasn't allocated */ +#if defined(__linux__) +	hw_rev = readl(hw_base + CSR_HW_REV); +#elif defined(__FreeBSD__) +	hw_rev = bus_read_4((struct resource *)hw_base, CSR_HW_REV); +#endif +	if (hw_rev == 0xffffffff) { +		dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); +		return -EIO;  	} -	/* register transport layer debugfs here */ -	iwl_trans_pcie_dbgfs_register(iwl_trans); - -	return 0; - -out_free_trans: -	iwl_trans_pcie_free(iwl_trans); -	return ret; +	return iwl_pci_gen1_2_probe(pdev, ent, mac_cfg, hw_base, hw_rev);  }  static void iwl_pci_remove(struct pci_dev *pdev)  {  	struct iwl_trans *trans = pci_get_drvdata(pdev); +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	if (!trans)  		return; +	cancel_delayed_work_sync(&trans_pcie->me_recheck_wk); +  	iwl_drv_stop(trans->drv);  	iwl_trans_pcie_free(trans); @@ -1605,11 +1243,31 @@ static int _iwl_pci_resume(struct device *device, bool restore)  	 * Scratch value was altered, this means the device was powered off, we  	 * need to reset it completely.  	 * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan, -	 * so assume that any bits there mean that the device is usable. +	 * but not bits [15:8]. So if we have bits set in lower word, assume +	 * the device is alive. +	 * Alternatively, if the scratch value is 0xFFFFFFFF, then we no longer +	 * have access to the device and consider it powered off. +	 * For older devices, just try silently to grab the NIC.  	 */ -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && -	    !iwl_read32(trans, CSR_FUNC_SCRATCH)) -		device_was_powered_off = true; +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +		u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH); + +		if (!(scratch & CSR_FUNC_SCRATCH_POWER_OFF_MASK) || +		    scratch == ~0U) +			device_was_powered_off = true; +	} else { +		/* +		 * bh are re-enabled by iwl_trans_pcie_release_nic_access, +		 * so re-enable them if _iwl_trans_pcie_grab_nic_access fails. +		 */ +		local_bh_disable(); +		if (_iwl_trans_pcie_grab_nic_access(trans, true)) { +			iwl_trans_pcie_release_nic_access(trans); +		} else { +			device_was_powered_off = true; +			local_bh_enable(); +		} +	}  	if (restore || device_was_powered_off) {  		trans->state = IWL_TRANS_NO_FW; @@ -1668,12 +1326,21 @@ static const struct dev_pm_ops iwl_dev_pm_ops = {  #endif /* CONFIG_PM_SLEEP */ +static void iwl_pci_dump(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct iwl_trans *trans = pci_get_drvdata(pdev); + +	iwl_op_mode_dump(trans->op_mode); +} +  static struct pci_driver iwl_pci_driver = {  	.name = DRV_NAME,  	.id_table = iwl_hw_card_ids,  	.probe = iwl_pci_probe,  	.remove = iwl_pci_remove,  	.driver.pm = IWL_PM_OPS, +	.driver.coredump = iwl_pci_dump,  #if defined(__FreeBSD__)  	/* Allow iwm(4) to attach for conflicting IDs for now. */  	.bsd_probe_return = (BUS_PROBE_DEFAULT - 1), @@ -1713,48 +1380,30 @@ sysctl_iwlwifi_pci_ids_name(SYSCTL_HANDLER_ARGS)  	id = iwl_hw_card_ids;  	while (id != NULL && id->vendor != 0) { -		if ((id->driver_data & TRANS_CFG_MARKER) != 0) { -			/* Skip and print them below. */ -			struct iwl_cfg_trans_params *trans; +		if (id->driver_data != 0) { +			const struct iwl_mac_cfg *trans; -			trans = (void *)(id->driver_data & ~TRANS_CFG_MARKER); -			sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%s\t%s\t%d\t%s\n", +			trans = (void *)id->driver_data; +			sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%d\t%s\t%d\n",  			    id->vendor, id->device, id->subvendor, id->subdevice, -			    "", "", trans->device_family, -			    iwl_device_family_name(trans->device_family)); - -		} else if (id->driver_data != 0) { -			const struct iwl_cfg *cfg; - -			cfg = (void *)(id->driver_data & ~TRANS_CFG_MARKER); -			sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%s\t%s\t%d\t%s\n", -			    id->vendor, id->device, id->subvendor, id->subdevice, -			    cfg->name, cfg->fw_name_pre, cfg->trans.device_family, -			    iwl_device_family_name(cfg->trans.device_family)); +			    trans->device_family, +			    iwl_device_family_name(trans->device_family), +			    trans->gen2);  		} else { -			sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%s\t%s\t%d\t%s\n", +			sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%d\t%s\t%d\n",  			    id->vendor, id->device, id->subvendor, id->subdevice, -			    "","", IWL_DEVICE_FAMILY_UNDEFINED, -			    iwl_device_family_name(IWL_DEVICE_FAMILY_UNDEFINED)); +			    IWL_DEVICE_FAMILY_UNDEFINED, +			    iwl_device_family_name(IWL_DEVICE_FAMILY_UNDEFINED), -1);  		}  		id++;  	}  	for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {  		const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; -		const char *name; - -		if (dev_info->name) -			name = dev_info->name; -		else if (dev_info->cfg && dev_info->cfg->name) -			name = dev_info->cfg->name; -		else -			name = ""; -		sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%s\t%s\t%d\t%s\n", +		sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%s\t%s\n",  		    PCI_VENDOR_ID_INTEL, dev_info->device, PCI_ANY_ID, dev_info->subdevice, -		    name, dev_info->cfg->fw_name_pre, dev_info->cfg->trans.device_family, -		    iwl_device_family_name(dev_info->cfg->trans.device_family)); +		    dev_info->name, dev_info->cfg->fw_name_pre);  	}  	error = sbuf_finish(sb); diff --git a/sys/contrib/dev/iwlwifi/pcie/internal.h b/sys/contrib/dev/iwlwifi/pcie/gen1_2/internal.h index 27a7e0b5b3d5..f48aeebb151c 100644 --- a/sys/contrib/dev/iwlwifi/pcie/internal.h +++ b/sys/contrib/dev/iwlwifi/pcie/gen1_2/internal.h @@ -1,6 +1,6 @@  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */  /* - * Copyright (C) 2003-2015, 2018-2024 Intel Corporation + * Copyright (C) 2003-2015, 2018-2025 Intel Corporation   * Copyright (C) 2013-2015 Intel Mobile Communications GmbH   * Copyright (C) 2016-2017 Intel Deutschland GmbH   */ @@ -22,7 +22,7 @@  #include "iwl-io.h"  #include "iwl-op-mode.h"  #include "iwl-drv.h" -#include "iwl-context-info.h" +#include "pcie/iwl-context-info.h"  /*   * RX related structures and functions @@ -39,7 +39,7 @@ struct iwl_host_cmd;   * trans_pcie layer */  /** - * struct iwl_rx_mem_buffer + * struct iwl_rx_mem_buffer - driver-side RX buffer descriptor   * @page_dma: bus address of rxb page   * @page: driver's pointer to the rxb page   * @list: list entry for the membuffer @@ -190,11 +190,12 @@ struct iwl_rb_allocator {   * iwl_get_closed_rb_stts - get closed rb stts from different structs   * @trans: transport pointer (for configuration)   * @rxq: the rxq to get the rb stts from + * Return: last closed RB index   */  static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,  					 struct iwl_rxq *rxq)  { -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {  		__le16 *rb_stts = rxq->rb_stts;  		return le16_to_cpu(READ_ONCE(*rb_stts)); @@ -269,6 +270,7 @@ enum iwl_pcie_fw_reset_state {  	FW_RESET_REQUESTED,  	FW_RESET_OK,  	FW_RESET_ERROR, +	FW_RESET_TOP_REQUESTED,  };  /** @@ -288,22 +290,14 @@ enum iwl_pcie_imr_status {  /**   * struct iwl_pcie_txqs - TX queues data   * - * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) - * @page_offs: offset from skb->cb to mac header page pointer - * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer   * @queue_used: bit mask of used queues   * @queue_stopped: bit mask of stopped queues   * @txq: array of TXQ data structures representing the TXQs   * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler - * @queue_alloc_cmd_ver: queue allocation command version   * @bc_pool: bytecount DMA allocations pool   * @bc_tbl_size: bytecount table size   * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO   *	(and similar usage) - * @cmd: command queue data - * @cmd.fifo: FIFO number - * @cmd.q_id: queue ID - * @cmd.wdg_timeout: watchdog timeout   * @tfd: TFD data   * @tfd.max_tbs: max number of buffers per TFD   * @tfd.size: TFD size @@ -315,26 +309,15 @@ struct iwl_pcie_txqs {  	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];  	struct dma_pool *bc_pool;  	size_t bc_tbl_size; -	bool bc_table_dword; -	u8 page_offs; -	u8 dev_cmd_offs;  	struct iwl_tso_hdr_page __percpu *tso_hdr_page;  	struct { -		u8 fifo; -		u8 q_id; -		unsigned int wdg_timeout; -	} cmd; - -	struct {  		u8 max_tbs;  		u16 size;  		u8 addr_size;  	} tfd;  	struct iwl_dma_ptr scd_bc_tbls; - -	u8 queue_alloc_cmd_ver;  };  /** @@ -344,7 +327,7 @@ struct iwl_pcie_txqs {   * @global_table: table mapping received VID from hw to rxb   * @rba: allocator for RX replenishing   * @ctxt_info: context information for FW self init - * @ctxt_info_gen3: context information for gen3 devices + * @ctxt_info_v2: context information for v1 devices   * @prph_info: prph info for self init   * @prph_scratch: prph scratch for self init   * @ctxt_info_dma_addr: dma addr of context information @@ -352,6 +335,7 @@ struct iwl_pcie_txqs {   * @prph_scratch_dma_addr: dma addr of prph scratch   * @ctxt_info_dma_addr: dma addr of context information   * @iml: image loader image virtual address + * @iml_len: image loader image size   * @iml_dma_addr: image loader image DMA address   * @trans: pointer to the generic transport area   * @scd_base_addr: scheduler sram base address in SRAM @@ -363,9 +347,6 @@ struct iwl_pcie_txqs {   * @hw_base: pci hardware address support   * @ucode_write_complete: indicates that the ucode has been copied.   * @ucode_write_waitq: wait queue for uCode load - * @cmd_queue - command queue number - * @rx_buf_size: Rx buffer size - * @scd_set_active: should the transport configure the SCD for HCMD queue   * @rx_page_order: page order for receive buffer size   * @rx_buf_bytes: RX buffer (RB) size in bytes   * @reg_lock: protect hw register access @@ -402,23 +383,23 @@ struct iwl_pcie_txqs {   * @irq_lock: lock to synchronize IRQ handling   * @txq_memory: TXQ allocation array   * @sx_waitq: waitqueue for Sx transitions - * @sx_complete: completion for Sx transitions - * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already + * @sx_state: state tracking Sx transitions   * @opmode_down: indicates opmode went away   * @num_rx_bufs: number of RX buffers to allocate/use - * @no_reclaim_cmds: special commands not using reclaim flow - *	(firmware workaround) - * @n_no_reclaim_cmds: number of special commands not using reclaim flow   * @affinity_mask: IRQ affinity mask for each RX queue   * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio   *	enable/disable - * @fw_reset_handshake: indicates FW reset handshake is needed   * @fw_reset_state: state of FW reset handshake   * @fw_reset_waitq: waitqueue for FW reset handshake   * @is_down: indicates the NIC is down   * @isr_stats: interrupt statistics   * @napi_dev: (fake) netdev for NAPI registration   * @txqs: transport tx queues data. + * @me_present: WiAMT/CSME is detected as present (1), not present (0) + *	or unknown (-1, so can still use it as a boolean safely) + * @me_recheck_wk: worker to recheck WiAMT/CSME presence + * @invalid_tx_cmd: invalid TX command buffer + * @wait_command_queue: wait queue for sync commands   */  struct iwl_trans_pcie {  	struct iwl_rxq *rxq; @@ -427,11 +408,12 @@ struct iwl_trans_pcie {  	struct iwl_rb_allocator rba;  	union {  		struct iwl_context_info *ctxt_info; -		struct iwl_context_info_gen3 *ctxt_info_gen3; +		struct iwl_context_info_v2 *ctxt_info_v2;  	};  	struct iwl_prph_info *prph_info;  	struct iwl_prph_scratch *prph_scratch;  	void *iml; +	size_t iml_len;  	dma_addr_t ctxt_info_dma_addr;  	dma_addr_t prph_info_dma_addr;  	dma_addr_t prph_scratch_dma_addr; @@ -466,17 +448,17 @@ struct iwl_trans_pcie {  	u8 __iomem *hw_base;  	bool ucode_write_complete; -	bool sx_complete; +	enum { +		IWL_SX_INVALID = 0, +		IWL_SX_WAITING, +		IWL_SX_ERROR, +		IWL_SX_COMPLETE, +	} sx_state;  	wait_queue_head_t ucode_write_waitq;  	wait_queue_head_t sx_waitq; -	u8 n_no_reclaim_cmds; -	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];  	u16 num_rx_bufs; -	enum iwl_amsdu_size rx_buf_size; -	bool scd_set_active; -	bool pcie_dbg_dumped_once;  	u32 rx_page_order;  	u32 rx_buf_bytes;  	u32 supported_dma_mask; @@ -510,7 +492,6 @@ struct iwl_trans_pcie {  	void *base_rb_stts;  	dma_addr_t base_rb_stts_dma; -	bool fw_reset_handshake;  	enum iwl_pcie_fw_reset_state fw_reset_state;  	wait_queue_head_t fw_reset_waitq;  	enum iwl_pcie_imr_status imr_status; @@ -518,6 +499,13 @@ struct iwl_trans_pcie {  	char rf_name[32];  	struct iwl_pcie_txqs txqs; + +	s8 me_present; +	struct delayed_work me_recheck_wk; + +	struct iwl_dma_ptr invalid_tx_cmd; + +	wait_queue_head_t wait_command_queue;  };  static inline struct iwl_trans_pcie * @@ -550,18 +538,17 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)   * Convention: trans API functions: iwl_trans_pcie_XXX   *	Other functions: iwl_pcie_XXX   */ -struct iwl_trans -*iwl_trans_pcie_alloc(struct pci_dev *pdev, -		      const struct pci_device_id *ent, -		      const struct iwl_cfg_trans_params *cfg_trans);  void iwl_trans_pcie_free(struct iwl_trans *trans);  void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,  					   struct device *dev); -bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); -#define _iwl_trans_pcie_grab_nic_access(trans)			\ +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent); +#define _iwl_trans_pcie_grab_nic_access(trans, silent)		\  	__cond_lock(nic_access_nobh,				\ -		    likely(__iwl_trans_pcie_grab_nic_access(trans))) +		    likely(__iwl_trans_pcie_grab_nic_access(trans, silent))) + +void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev); +void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);  /*****************************************************  * RX @@ -620,7 +607,7 @@ struct iwl_tso_page_info {  				      IWL_TSO_PAGE_DATA_SIZE))  int iwl_pcie_tx_init(struct iwl_trans *trans); -void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); +void iwl_pcie_tx_start(struct iwl_trans *trans);  int iwl_pcie_tx_stop(struct iwl_trans *trans);  void iwl_pcie_tx_free(struct iwl_trans *trans);  bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, @@ -643,7 +630,8 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,  				    unsigned int len);  struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,  				   struct iwl_cmd_meta *cmd_meta, -				   u8 **hdr, unsigned int hdr_room); +				   u8 **hdr, unsigned int hdr_room, +				   unsigned int offset);  void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,  			     struct iwl_cmd_meta *cmd_meta); @@ -675,7 +663,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	if (trans->trans_cfg->gen2) +	if (trans->mac_cfg->gen2)  		idx = iwl_txq_get_cmd_index(txq, idx);  	return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx; @@ -710,22 +698,24 @@ static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)   * iwl_txq_inc_wrap - increment queue index, wrap back to beginning   * @trans: the transport (for configuration data)   * @index: current index + * Return: the queue index incremented, subject to wrapping   */  static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)  {  	return ++index & -		(trans->trans_cfg->base_params->max_tfd_queue_size - 1); +		(trans->mac_cfg->base->max_tfd_queue_size - 1);  }  /**   * iwl_txq_dec_wrap - decrement queue index, wrap back to end   * @trans: the transport (for configuration data)   * @index: current index + * Return: the queue index decremented, subject to wrapping   */  static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)  {  	return --index & -		(trans->trans_cfg->base_params->max_tfd_queue_size - 1); +		(trans->mac_cfg->base->max_tfd_queue_size - 1);  }  void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); @@ -748,10 +738,12 @@ int iwl_txq_gen2_set_tb(struct iwl_trans *trans,  static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,  						struct iwl_tfh_tfd *tfd)  { +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); +  	tfd->num_tbs = 0; -	iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, -			    trans->invalid_tx_cmd.size); +	iwl_txq_gen2_set_tb(trans, tfd, trans_pcie->invalid_tx_cmd.dma, +			    trans_pcie->invalid_tx_cmd.size);  }  void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, @@ -778,7 +770,7 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,  	struct iwl_tfd *tfd;  	struct iwl_tfd_tb *tb; -	if (trans->trans_cfg->gen2) { +	if (trans->mac_cfg->gen2) {  		struct iwl_tfh_tfd *tfh_tfd = _tfd;  		struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; @@ -936,11 +928,13 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)  	}  } -static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) +static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans, +						   bool top_reset)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); +	IWL_DEBUG_ISR(trans, "Enabling %s interrupt only\n", +		      top_reset ? "RESET" : "ALIVE");  	if (!trans_pcie->msix_enabled) {  		/* @@ -950,11 +944,20 @@ static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)  		 * RX interrupt which will allow us to receive the ALIVE  		 * notification (which is Rx) and continue the flow.  		 */ -		trans_pcie->inta_mask =  CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; +		if (top_reset) +			trans_pcie->inta_mask =  CSR_INT_BIT_RESET_DONE; +		else +			trans_pcie->inta_mask =  CSR_INT_BIT_ALIVE | +						 CSR_INT_BIT_FH_RX;  		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);  	} else { -		iwl_enable_hw_int_msk_msix(trans, -					   MSIX_HW_INT_CAUSES_REG_ALIVE); +		u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE +				    : MSIX_HW_INT_CAUSES_REG_ALIVE; + +		iwl_enable_hw_int_msk_msix(trans, val); + +		if (top_reset) +			return;  		/*  		 * Leave all the FH causes enabled to get the ALIVE  		 * notification. @@ -1001,7 +1004,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)  					   MSIX_HW_INT_CAUSES_REG_RF_KILL);  	} -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {  		/*  		 * On 9000-series devices this bit isn't enabled by default, so  		 * when we power down the device we need set the bit to allow it @@ -1027,40 +1030,12 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)  		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);  } -static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, -						  u32 reg, u32 mask, u32 value) -{ -	u32 v; - -#ifdef CONFIG_IWLWIFI_DEBUG -	WARN_ON_ONCE(value & ~mask); -#endif - -	v = iwl_read32(trans, reg); -	v &= ~mask; -	v |= value; -	iwl_write32(trans, reg, v); -} - -static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, -					      u32 reg, u32 mask) -{ -	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); -} - -static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, -					    u32 reg, u32 mask) -{ -	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); -} -  static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)  {  	return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));  }  void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq); -void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);  #ifdef CONFIG_IWLWIFI_DEBUGFS  void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); @@ -1072,8 +1047,8 @@ static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }  void iwl_pcie_rx_allocator_work(struct work_struct *data);  /* common trans ops for all generations transports */ -void iwl_trans_pcie_configure(struct iwl_trans *trans, -			      const struct iwl_trans_config *trans_cfg); +void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans); +int _iwl_trans_pcie_start_hw(struct iwl_trans *trans);  int iwl_trans_pcie_start_hw(struct iwl_trans *trans);  void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);  void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val); @@ -1083,8 +1058,6 @@ u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);  void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);  int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,  			    void *buf, int dwords); -int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, -			     const void *buf, int dwords);  int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);  struct iwl_trans_dump_data *  iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, @@ -1101,15 +1074,24 @@ void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,  int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,  				 u32 *val);  bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); -void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); +void __releases(nic_access_nobh) +iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); +int iwl_pci_gen1_2_probe(struct pci_dev *pdev, +			 const struct pci_device_id *ent, +			 const struct iwl_mac_cfg *mac_cfg, +			 u8 __iomem *hw_base, u32 hw_rev);  /* transport gen 1 exported functions */ -void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr); +void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);  int iwl_trans_pcie_start_fw(struct iwl_trans *trans, -			    const struct fw_img *fw, bool run_in_rfkill); +			    const struct iwl_fw *fw, +			    const struct fw_img *img, +			    bool run_in_rfkill);  void iwl_trans_pcie_stop_device(struct iwl_trans *trans);  /* common functions that are used by gen2 transport */ +void iwl_trans_pcie_gen2_op_mode_leave(struct iwl_trans *trans);  int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);  void iwl_pcie_apm_config(struct iwl_trans *trans);  int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); @@ -1124,19 +1106,13 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,  void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);  void iwl_pcie_apply_destination(struct iwl_trans *trans); -/* common functions that are used by gen3 transport */ -void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); -  /* transport gen 2 exported functions */  int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, -				 const struct fw_img *fw, bool run_in_rfkill); +				 const struct iwl_fw *fw, +				 const struct fw_img *img, +				 bool run_in_rfkill);  void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans); -int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, -				  struct iwl_host_cmd *cmd);  void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); -void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); -void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, -				  bool test, bool reset);  int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,  			       struct iwl_host_cmd *cmd);  int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, diff --git a/sys/contrib/dev/iwlwifi/pcie/rx.c b/sys/contrib/dev/iwlwifi/pcie/gen1_2/rx.c index 6c7fc89ed14e..340bc56ae842 100644 --- a/sys/contrib/dev/iwlwifi/pcie/rx.c +++ b/sys/contrib/dev/iwlwifi/pcie/gen1_2/rx.c @@ -12,7 +12,8 @@  #include "iwl-io.h"  #include "internal.h"  #include "iwl-op-mode.h" -#include "iwl-context-info-gen3.h" +#include "pcie/iwl-context-info-v2.h" +#include "fw/dbg.h"  /******************************************************************************   * @@ -143,12 +144,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)   */  int iwl_pcie_rx_stop(struct iwl_trans *trans)  { -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {  		/* TODO: remove this once fw does it */ -		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); -		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, +		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_AX210, 0); +		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_AX210,  					      RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); -	} else if (trans->trans_cfg->mq_rx_supported) { +	} else if (trans->mac_cfg->mq_rx_supported) {  		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);  		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,  					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); @@ -175,7 +176,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,  	 * 1. shadow registers aren't enabled  	 * 2. there is a chance that the NIC is asleep  	 */ -	if (!trans->trans_cfg->base_params->shadow_reg_enable && +	if (!trans->mac_cfg->base->shadow_reg_enable &&  	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {  		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); @@ -190,9 +191,9 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,  	}  	rxq->write_actual = round_down(rxq->write, 8); -	if (!trans->trans_cfg->mq_rx_supported) +	if (!trans->mac_cfg->mq_rx_supported)  		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); -	else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +	else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)  		iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |  			    HBUS_TARG_WRPTR_RX_Q(rxq->id));  	else @@ -205,7 +206,7 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int i; -	for (i = 0; i < trans->num_rx_queues; i++) { +	for (i = 0; i < trans->info.num_rxqs; i++) {  		struct iwl_rxq *rxq = &trans_pcie->rxq[i];  		if (!rxq->need_update) @@ -221,7 +222,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,  				struct iwl_rxq *rxq,  				struct iwl_rx_mem_buffer *rxb)  { -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {  		struct iwl_rx_transfer_desc *bd = rxq->bd;  		BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); @@ -234,12 +235,8 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,  		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);  	} -#if defined(__linux__)  	IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", -#elif defined(__FreeBSD__) -	IWL_DEBUG_PCI_RW(trans, "Assigned virtual RB ID %u to queue %d index %d\n",  		     (u32)rxb->vid, rxq->id, rxq->write); -#endif  }  /* @@ -352,7 +349,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,  static  void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)  { -	if (trans->trans_cfg->mq_rx_supported) +	if (trans->mac_cfg->mq_rx_supported)  		iwl_pcie_rxmq_restock(trans, rxq);  	else  		iwl_pcie_rxsq_restock(trans, rxq); @@ -366,8 +363,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,  					   u32 *offset, gfp_t priority)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);  	unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; +	unsigned int rbsize = trans_pcie->rx_buf_bytes;  	struct page *page;  	gfp_t gfp_mask = priority; @@ -661,19 +658,19 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)  static int iwl_pcie_free_bd_size(struct iwl_trans *trans)  { -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)  		return sizeof(struct iwl_rx_transfer_desc); -	return trans->trans_cfg->mq_rx_supported ? +	return trans->mac_cfg->mq_rx_supported ?  			sizeof(__le64) : sizeof(__le32);  }  static int iwl_pcie_used_bd_size(struct iwl_trans *trans)  { -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)  		return sizeof(struct iwl_rx_completion_desc_bz); -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)  		return sizeof(struct iwl_rx_completion_desc);  	return sizeof(__le32); @@ -705,7 +702,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,  static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)  { -	bool use_rx_td = (trans->trans_cfg->device_family >= +	bool use_rx_td = (trans->mac_cfg->device_family >=  			  IWL_DEVICE_FAMILY_AX210);  	if (use_rx_td) @@ -724,8 +721,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,  	int free_size;  	spin_lock_init(&rxq->lock); -	if (trans->trans_cfg->mq_rx_supported) -		rxq->queue_size = trans->cfg->num_rbds; +	if (trans->mac_cfg->mq_rx_supported) +		rxq->queue_size = iwl_trans_get_num_rbds(trans);  	else  		rxq->queue_size = RX_QUEUE_SIZE; @@ -740,7 +737,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,  	if (!rxq->bd)  		goto err; -	if (trans->trans_cfg->mq_rx_supported) { +	if (trans->mac_cfg->mq_rx_supported) {  		rxq->used_bd = dma_alloc_coherent(dev,  						  iwl_pcie_used_bd_size(trans) *  							rxq->queue_size, @@ -757,7 +754,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,  	return 0;  err: -	for (i = 0; i < trans->num_rx_queues; i++) { +	for (i = 0; i < trans->info.num_rxqs; i++) {  		struct iwl_rxq *rxq = &trans_pcie->rxq[i];  		iwl_pcie_free_rxq_dma(trans, rxq); @@ -776,7 +773,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)  	if (WARN_ON(trans_pcie->rxq))  		return -EINVAL; -	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), +	trans_pcie->rxq = kcalloc(trans->info.num_rxqs, sizeof(struct iwl_rxq),  				  GFP_KERNEL);  	trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),  				      sizeof(trans_pcie->rx_pool[0]), @@ -799,7 +796,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)  	 */  	trans_pcie->base_rb_stts =  			dma_alloc_coherent(trans->dev, -					   rb_stts_size * trans->num_rx_queues, +					   rb_stts_size * trans->info.num_rxqs,  					   &trans_pcie->base_rb_stts_dma,  					   GFP_KERNEL);  	if (!trans_pcie->base_rb_stts) { @@ -807,7 +804,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)  		goto err;  	} -	for (i = 0; i < trans->num_rx_queues; i++) { +	for (i = 0; i < trans->info.num_rxqs; i++) {  		struct iwl_rxq *rxq = &trans_pcie->rxq[i];  		rxq->id = i; @@ -820,7 +817,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)  err:  	if (trans_pcie->base_rb_stts) {  		dma_free_coherent(trans->dev, -				  rb_stts_size * trans->num_rx_queues, +				  rb_stts_size * trans->info.num_rxqs,  				  trans_pcie->base_rb_stts,  				  trans_pcie->base_rb_stts_dma);  		trans_pcie->base_rb_stts = NULL; @@ -838,11 +835,10 @@ err:  static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	u32 rb_size;  	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ -	switch (trans_pcie->rx_buf_size) { +	switch (trans->conf.rx_buf_size) {  	case IWL_AMSDU_4K:  		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;  		break; @@ -910,7 +906,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)  	u32 rb_size, enabled = 0;  	int i; -	switch (trans_pcie->rx_buf_size) { +	switch (trans->conf.rx_buf_size) {  	case IWL_AMSDU_2K:  		rb_size = RFH_RXF_DMA_RB_SIZE_2K;  		break; @@ -936,7 +932,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)  	/* disable free amd used rx queue operation */  	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); -	for (i = 0; i < trans->num_rx_queues; i++) { +	for (i = 0; i < trans->info.num_rxqs; i++) {  		/* Tell device where to find RBD free table in DRAM */  		iwl_write_prph64_no_grab(trans,  					 RFH_Q_FRBDCB_BA_LSB(i), @@ -980,7 +976,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)  			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |  			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |  			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, -					       trans->trans_cfg->integrated ? +					       trans->mac_cfg->integrated ?  					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :  					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));  	/* Enable the relevant rx queues */ @@ -1076,7 +1072,7 @@ void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)  	if (unlikely(!trans_pcie->rxq))  		return; -	for (i = 0; i < trans->num_rx_queues; i++) { +	for (i = 0; i < trans->info.num_rxqs; i++) {  		struct iwl_rxq *rxq = &trans_pcie->rxq[i];  		if (rxq && rxq->napi.poll) @@ -1113,7 +1109,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)  	for (i = 0; i < RX_QUEUE_SIZE; i++)  		def_rxq->queue[i] = NULL; -	for (i = 0; i < trans->num_rx_queues; i++) { +	for (i = 0; i < trans->info.num_rxqs; i++) {  		struct iwl_rxq *rxq = &trans_pcie->rxq[i];  		spin_lock_bh(&rxq->lock); @@ -1126,7 +1122,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)  		rxq->write = 0;  		rxq->write_actual = 0;  		memset(rxq->rb_stts, 0, -		       (trans->trans_cfg->device_family >= +		       (trans->mac_cfg->device_family >=  			IWL_DEVICE_FAMILY_AX210) ?  		       sizeof(__le16) : sizeof(struct iwl_rb_status)); @@ -1148,9 +1144,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)  	}  	/* move the pool to the default queue and allocator ownerships */ -	queue_size = trans->trans_cfg->mq_rx_supported ? +	queue_size = trans->mac_cfg->mq_rx_supported ?  			trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; -	allocator_pool_size = trans->num_rx_queues * +	allocator_pool_size = trans->info.num_rxqs *  		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);  	num_alloc = queue_size + allocator_pool_size; @@ -1179,7 +1175,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)  	if (ret)  		return ret; -	if (trans->trans_cfg->mq_rx_supported) +	if (trans->mac_cfg->mq_rx_supported)  		iwl_pcie_rx_mq_hw_init(trans);  	else  		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); @@ -1227,14 +1223,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)  	if (trans_pcie->base_rb_stts) {  		dma_free_coherent(trans->dev, -				  rb_stts_size * trans->num_rx_queues, +				  rb_stts_size * trans->info.num_rxqs,  				  trans_pcie->base_rb_stts,  				  trans_pcie->base_rb_stts_dma);  		trans_pcie->base_rb_stts = NULL;  		trans_pcie->base_rb_stts_dma = 0;  	} -	for (i = 0; i < trans->num_rx_queues; i++) { +	for (i = 0; i < trans->info.num_rxqs; i++) {  		struct iwl_rxq *rxq = &trans_pcie->rxq[i];  		iwl_pcie_free_rxq_dma(trans, rxq); @@ -1305,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,  				int i)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; +	struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];  	bool page_stolen = false;  	int max_len = trans_pcie->rx_buf_bytes;  	u32 offset = 0; @@ -1372,8 +1368,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,  		if (reclaim && !pkt->hdr.group_id) {  			int i; -			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { -				if (trans_pcie->no_reclaim_cmds[i] == +			for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) { +				if (trans->conf.no_reclaim_cmds[i] ==  							pkt->hdr.cmd) {  					reclaim = false;  					break; @@ -1412,7 +1408,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,  		}  		page_stolen |= rxcb._page_stolen; -		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +		if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)  			break;  	} @@ -1458,18 +1454,18 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,  	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);  	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4); -	if (!trans->trans_cfg->mq_rx_supported) { +	if (!trans->mac_cfg->mq_rx_supported) {  		rxb = rxq->queue[i];  		rxq->queue[i] = NULL;  		return rxb;  	} -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {  		struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;  		vid = le16_to_cpu(cd[i].rbid);  		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; -	} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +	} else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {  		struct iwl_rx_completion_desc *cd = rxq->used_bd;  		vid = le16_to_cpu(cd[i].rbid); @@ -1652,7 +1648,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)  	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); -	if (WARN_ON(entry->entry >= trans->num_rx_queues)) +	if (WARN_ON(entry->entry >= trans->info.num_rxqs))  		return IRQ_NONE;  	if (!trans_pcie->rxq) { @@ -1687,29 +1683,38 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)  	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */  	if (trans->cfg->internal_wimax_coex && -	    !trans->cfg->apmg_not_supported && +	    !trans->mac_cfg->base->apmg_not_supported &&  	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &  			     APMS_CLK_VAL_MRB_FUNC_MODE) ||  	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &  			    APMG_PS_CTRL_VAL_RESET_REQ))) {  		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);  		iwl_op_mode_wimax_active(trans->op_mode); -		wake_up(&trans->wait_command_queue); +		wake_up(&trans_pcie->wait_command_queue);  		return;  	} -	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { +	for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {  		if (!trans_pcie->txqs.txq[i])  			continue; -		del_timer(&trans_pcie->txqs.txq[i]->stuck_timer); +		timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer); +	} + +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) { +		u32 val = iwl_read32(trans, CSR_IPC_STATE); + +		if (val & CSR_IPC_STATE_TOP_RESET_REQ) { +			IWL_ERR(trans, "FW requested TOP reset for FSEQ\n"); +			trans->do_top_reset = 1; +		}  	}  	/* The STATUS_FW_ERROR bit is set in this function. This must happen  	 * before we wake up the command caller, to ensure a proper cleanup. */ -	iwl_trans_fw_error(trans, false); +	iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);  	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); -	wake_up(&trans->wait_command_queue); +	wake_up(&trans_pcie->wait_command_queue);  }  static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) @@ -1824,7 +1829,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)  				       &trans->status))  			IWL_DEBUG_RF_KILL(trans,  					  "Rfkill while SYNC HCMD in flight\n"); -		wake_up(&trans->wait_command_queue); +		wake_up(&trans_pcie->wait_command_queue);  	} else {  		clear_bit(STATUS_RFKILL_HW, &trans->status);  		if (trans_pcie->opmode_down) @@ -1832,6 +1837,59 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)  	}  } +static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans) +{ +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); +	u32 state; + +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) { +		u32 val = iwl_read32(trans, CSR_IPC_STATE); + +		state = u32_get_bits(val, CSR_IPC_STATE_RESET); +		IWL_DEBUG_ISR(trans, "IPC state = 0x%x/%d\n", val, state); +	} else { +		state = CSR_IPC_STATE_RESET_SW_READY; +	} + +	switch (state) { +	case CSR_IPC_STATE_RESET_SW_READY: +		if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { +			IWL_DEBUG_ISR(trans, "Reset flow completed\n"); +			trans_pcie->fw_reset_state = FW_RESET_OK; +			wake_up(&trans_pcie->fw_reset_waitq); +			break; +		} +		fallthrough; +	case CSR_IPC_STATE_RESET_TOP_READY: +		if (trans_pcie->fw_reset_state == FW_RESET_TOP_REQUESTED) { +			IWL_DEBUG_ISR(trans, "TOP Reset continues\n"); +			trans_pcie->fw_reset_state = FW_RESET_OK; +			wake_up(&trans_pcie->fw_reset_waitq); +			break; +		} +		fallthrough; +	case CSR_IPC_STATE_RESET_NONE: +		IWL_FW_CHECK_FAILED(trans, +				    "Invalid reset interrupt (state=%d)!\n", +				    state); +		break; +	case CSR_IPC_STATE_RESET_TOP_FOLLOWER: +		if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { +			/* if we were in reset, wake that up */ +			IWL_INFO(trans, +				 "TOP reset from BT while doing reset\n"); +			trans_pcie->fw_reset_state = FW_RESET_OK; +			wake_up(&trans_pcie->fw_reset_waitq); +		} else { +			IWL_INFO(trans, "TOP reset from BT\n"); +			trans->state = IWL_TRANS_NO_FW; +			iwl_trans_schedule_reset(trans, +						 IWL_ERR_TYPE_TOP_RESET_BY_BT); +		} +		break; +	} +} +  irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)  {  	struct iwl_trans *trans = dev_id; @@ -1944,7 +2002,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)  	if (inta & CSR_INT_BIT_ALIVE) {  		IWL_DEBUG_ISR(trans, "Alive interrupt\n");  		isr_stats->alive++; -		if (trans->trans_cfg->gen2) { +		if (trans->mac_cfg->gen2) {  			/*  			 * We can restock, since firmware configured  			 * the RFH @@ -1955,6 +2013,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)  		handled |= CSR_INT_BIT_ALIVE;  	} +	if (inta & CSR_INT_BIT_RESET_DONE) { +		iwl_trans_pcie_handle_reset_interrupt(trans); +		handled |= CSR_INT_BIT_RESET_DONE; +	} +  	/* Safely ignore these bits for debug checks below */  	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); @@ -1976,7 +2039,12 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)  		IWL_ERR(trans, "Microcode SW error detected. "  			" Restarting 0x%X.\n", inta);  		isr_stats->sw++; -		iwl_pcie_irq_handle_error(trans); +		if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { +			trans_pcie->fw_reset_state = FW_RESET_ERROR; +			wake_up(&trans_pcie->fw_reset_waitq); +		} else { +			iwl_pcie_irq_handle_error(trans); +		}  		handled |= CSR_INT_BIT_SW_ERR;  	} @@ -2082,7 +2150,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)  			iwl_enable_rfkill_int(trans);  		/* Re-enable the ALIVE / Rx interrupt if it occurred */  		else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) -			iwl_enable_fw_load_int_ctx_info(trans); +			iwl_enable_fw_load_int_ctx_info(trans, false);  		spin_unlock_bh(&trans_pcie->irq_lock);  	} @@ -2299,7 +2367,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)  		}  	} -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)  		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;  	else  		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; @@ -2307,7 +2375,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)  	if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {  		IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",  			inta_hw); -		/* TODO: PLDR flow required here for >= Bz */ +		if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +			trans->request_top_reset = 1; +			iwl_op_mode_nic_error(trans->op_mode, +					      IWL_ERR_TYPE_TOP_FATAL_ERROR); +			iwl_trans_schedule_reset(trans, +						 IWL_ERR_TYPE_TOP_FATAL_ERROR); +		}  	}  	/* Error detected by uCode */ @@ -2326,6 +2400,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)  		} else {  			iwl_pcie_irq_handle_error(trans);  		} + +		if (trans_pcie->sx_state == IWL_SX_WAITING) { +			trans_pcie->sx_state = IWL_SX_ERROR; +			wake_up(&trans_pcie->sx_waitq); +		}  	}  	/* After checking FH register check HW register */ @@ -2348,7 +2427,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)  	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {  		IWL_DEBUG_ISR(trans, "Alive interrupt\n");  		isr_stats->alive++; -		if (trans->trans_cfg->gen2) { +		if (trans->mac_cfg->gen2) {  			/* We can restock, since firmware configured the RFH */  			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);  		} @@ -2362,13 +2441,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)  	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {  		u32 sleep_notif =  			le32_to_cpu(trans_pcie->prph_info->sleep_notif); +  		if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||  		    sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {  			IWL_DEBUG_ISR(trans,  				      "Sx interrupt: sleep notification = 0x%x\n",  				      sleep_notif); -			trans_pcie->sx_complete = true; -			wake_up(&trans_pcie->sx_waitq); +			if (trans_pcie->sx_state == IWL_SX_WAITING) { +				trans_pcie->sx_state = IWL_SX_COMPLETE; +				wake_up(&trans_pcie->sx_waitq); +			} else { +				IWL_ERR(trans, +					"unexpected Sx interrupt (0x%x)\n", +					sleep_notif); +			}  		} else {  			/* uCode wakes up after power-down sleep */  			IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); @@ -2398,11 +2484,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)  		iwl_pcie_irq_handle_error(trans);  	} -	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { -		IWL_DEBUG_ISR(trans, "Reset flow completed\n"); -		trans_pcie->fw_reset_state = FW_RESET_OK; -		wake_up(&trans_pcie->fw_reset_waitq); -	} +	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) +		iwl_trans_pcie_handle_reset_interrupt(trans);  	if (!polling)  		iwl_pcie_clear_irq(trans, entry->entry); diff --git a/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c b/sys/contrib/dev/iwlwifi/pcie/gen1_2/trans-gen2.c index 96127accc0a0..b27e58c1a00b 100644 --- a/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c +++ b/sys/contrib/dev/iwlwifi/pcie/gen1_2/trans-gen2.c @@ -1,15 +1,15 @@  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause  /*   * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation   */  #if defined(__FreeBSD__)  #include <linux/delay.h>  #endif  #include "iwl-trans.h"  #include "iwl-prph.h" -#include "iwl-context-info.h" -#include "iwl-context-info-gen3.h" +#include "pcie/iwl-context-info.h" +#include "pcie/iwl-context-info-v2.h"  #include "internal.h"  #include "fw/dbg.h" @@ -46,7 +46,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)  	 * wake device's PCI Express link L1a -> L0s  	 */  	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); +		    CSR_HW_IF_CONFIG_REG_HAP_WAKE);  	iwl_pcie_apm_config(trans); @@ -71,8 +71,8 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)  		iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,  			    CSR_RESET_LINK_PWR_MGMT_DISABLED);  		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -			    CSR_HW_IF_CONFIG_REG_PREPARE | -			    CSR_HW_IF_CONFIG_REG_ENABLE_PME); +			    CSR_HW_IF_CONFIG_REG_WAKE_ME | +			    CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);  		mdelay(1);  		iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,  			      CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -84,13 +84,13 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)  	/* Stop device's DMA activity */  	iwl_pcie_apm_stop_master(trans); -	iwl_trans_sw_reset(trans, false); +	iwl_trans_pcie_sw_reset(trans, false);  	/*  	 * Clear "initialization complete" bit to move adapter from  	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.  	 */ -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)  		iwl_clear_bit(trans, CSR_GP_CNTRL,  			      CSR_GP_CNTRL_REG_FLAG_MAC_INIT);  	else @@ -98,17 +98,17 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)  			      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);  } -static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) +void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int ret;  	trans_pcie->fw_reset_state = FW_RESET_REQUESTED; -	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)  		iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,  				    UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE); -	else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) +	else if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)  		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,  				    UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);  	else @@ -120,20 +120,37 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)  				 trans_pcie->fw_reset_state != FW_RESET_REQUESTED,  				 FW_RESET_TIMEOUT);  	if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) { -		u32 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); +		bool reset_done; +		u32 inta_hw; + +		if (trans_pcie->msix_enabled) { +			inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); +			reset_done = +				inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE; +		} else { +			inta_hw = iwl_read32(trans, CSR_INT); +			reset_done = inta_hw & CSR_INT_BIT_RESET_DONE; +		}  		IWL_ERR(trans, -			"timeout waiting for FW reset ACK (inta_hw=0x%x)\n", -			inta_hw); - -		if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) -			iwl_trans_fw_error(trans, true); +			"timeout waiting for FW reset ACK (inta_hw=0x%x, reset_done %d)\n", +			inta_hw, reset_done); + +		if (!reset_done) { +			struct iwl_fw_error_dump_mode mode = { +				.type = IWL_ERR_TYPE_RESET_HS_TIMEOUT, +				.context = IWL_ERR_CONTEXT_FROM_OPMODE, +			}; +			iwl_op_mode_nic_error(trans->op_mode, +					      IWL_ERR_TYPE_RESET_HS_TIMEOUT); +			iwl_op_mode_dump_error(trans->op_mode, &mode); +		}  	}  	trans_pcie->fw_reset_state = FW_RESET_IDLE;  } -void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) +static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -142,9 +159,15 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)  	if (trans_pcie->is_down)  		return; -	if (trans->state >= IWL_TRANS_FW_STARTED) -		if (trans_pcie->fw_reset_handshake) -			iwl_trans_pcie_fw_reset_handshake(trans); +	if (trans->state >= IWL_TRANS_FW_STARTED && +	    trans->conf.fw_reset_handshake) { +		/* +		 * Reset handshake can dump firmware on timeout, but that +		 * should assume that the firmware is already dead. +		 */ +		trans->state = IWL_TRANS_NO_FW; +		iwl_trans_pcie_fw_reset_handshake(trans); +	}  	trans_pcie->is_down = true; @@ -171,8 +194,8 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)  	}  	iwl_pcie_ctxt_info_free_paging(trans); -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) -		iwl_pcie_ctxt_info_gen3_free(trans, false); +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +		iwl_pcie_ctxt_info_v2_free(trans, false);  	else  		iwl_pcie_ctxt_info_free(trans); @@ -180,7 +203,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)  	iwl_pcie_gen2_apm_stop(trans, false);  	/* re-take ownership to prevent other users from stealing the device */ -	iwl_trans_sw_reset(trans, true); +	iwl_trans_pcie_sw_reset(trans, true);  	/*  	 * Upon stop, the IVAR table gets erased, so msi-x won't @@ -233,7 +256,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, -			       trans->cfg->min_txq_size); +			       trans->mac_cfg->base->min_txq_size);  	int ret;  	/* TODO: most of the logic can be removed in A0 - but not in Z0 */ @@ -250,7 +273,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)  		return -ENOMEM;  	/* Allocate or reset and init all Tx and Command queues */ -	if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size)) +	if (iwl_txq_gen2_init(trans, trans->conf.cmd_queue, queue_size))  		return -ENOMEM;  	/* enable shadow regs in HW */ @@ -271,7 +294,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)  	if (buf[0])  		return; -	switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { +	switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {  	case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):  		pos = scnprintf(buf, buflen, "JF");  		break; @@ -290,15 +313,12 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)  	case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):  		pos = scnprintf(buf, buflen, "HRCDB");  		break; -	case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_MS): -		pos = scnprintf(buf, buflen, "MS"); -		break;  	case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM):  		pos = scnprintf(buf, buflen, "FM");  		break;  	case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP):  		if (SILICON_Z_STEP == -		    CSR_HW_RFID_STEP(trans->hw_rf_id)) +		    CSR_HW_RFID_STEP(trans->info.hw_rf_id))  			pos = scnprintf(buf, buflen, "WHTC");  		else  			pos = scnprintf(buf, buflen, "WH"); @@ -307,7 +327,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)  		return;  	} -	switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { +	switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {  	case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):  	case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):  	case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): @@ -330,7 +350,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)  	}  	pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x", -			 trans->hw_rf_id); +			 trans->info.hw_rf_id);  	IWL_INFO(trans, "Detected RF %s\n", buf); @@ -357,8 +377,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)  	/* now that we got alive we can free the fw image & the context info.  	 * paging memory cannot be freed included since FW will still use it  	 */ -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) -		iwl_pcie_ctxt_info_gen3_free(trans, true); +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +		iwl_pcie_ctxt_info_v2_free(trans, true);  	else  		iwl_pcie_ctxt_info_free(trans); @@ -372,6 +392,11 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)  	iwl_pcie_get_rf_name(trans);  	mutex_unlock(&trans_pcie->mutex); + +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +		trans->step_urm = !!(iwl_read_umac_prph(trans, +							CNVI_PMU_STEP_FLOW) & +					CNVI_PMU_STEP_FLOW_FORCE_URM);  }  static bool iwl_pcie_set_ltr(struct iwl_trans *trans) @@ -391,21 +416,21 @@ static bool iwl_pcie_set_ltr(struct iwl_trans *trans)  	 * initialize the LTR to ~250 usec (see ltr_val above).  	 * The firmware initializes this again later (to a smaller value).  	 */ -	if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || -	     trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && -	    !trans->trans_cfg->integrated) { +	if ((trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || +	     trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) && +	    !trans->mac_cfg->integrated) {  		iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);  		return true;  	} -	if (trans->trans_cfg->integrated && -	    trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { +	if (trans->mac_cfg->integrated && +	    trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) {  		iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);  		iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);  		return true;  	} -	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { +	if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {  		/* First clear the interrupt, just in case */  		iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,  			    MSIX_HW_INT_CAUSES_REG_IML); @@ -462,16 +487,22 @@ static void iwl_pcie_spin_for_iml(struct iwl_trans *trans)  }  int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, -				 const struct fw_img *fw, bool run_in_rfkill) +				 const struct iwl_fw *fw, +				 const struct fw_img *img, +				 bool run_in_rfkill)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	bool hw_rfkill, keep_ram_busy; +	bool top_reset_done = false;  	int ret; +	mutex_lock(&trans_pcie->mutex); +again:  	/* This may fail if AMT took ownership of the device */  	if (iwl_pcie_prepare_card_hw(trans)) {  		IWL_WARN(trans, "Exit HW not ready\n"); -		return -EIO; +		ret = -EIO; +		goto out;  	}  	iwl_enable_rfkill_int(trans); @@ -488,8 +519,6 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,  	/* Make sure it finished running */  	iwl_pcie_synchronize_irqs(trans); -	mutex_lock(&trans_pcie->mutex); -  	/* If platform's RF_KILL switch is NOT set to KILL */  	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);  	if (hw_rfkill && !run_in_rfkill) { @@ -519,20 +548,39 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,  		goto out;  	} -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) -		ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); -	else -		ret = iwl_pcie_ctxt_info_init(trans, fw); -	if (ret) +	if (WARN_ON(trans->do_top_reset && +		    trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)) { +		ret = -EINVAL;  		goto out; +	} + +	/* we need to wait later - set state */ +	if (trans->do_top_reset) +		trans_pcie->fw_reset_state = FW_RESET_TOP_REQUESTED; + +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +		if (!top_reset_done) { +			ret = iwl_pcie_ctxt_info_v2_alloc(trans, fw, img); +			if (ret) +				goto out; +		} + +		iwl_pcie_ctxt_info_v2_kick(trans); +	} else { +		ret = iwl_pcie_ctxt_info_init(trans, img); +		if (ret) +			goto out; +	}  	keep_ram_busy = !iwl_pcie_set_ltr(trans); -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +		IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n", +				iwl_read32(trans, CSR_FUNC_SCRATCH));  		iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);  		iwl_set_bit(trans, CSR_GP_CNTRL,  			    CSR_GP_CNTRL_REG_FLAG_ROM_START); -	} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +	} else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {  		iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);  	} else {  		iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); @@ -541,6 +589,43 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,  	if (keep_ram_busy)  		iwl_pcie_spin_for_iml(trans); +	if (trans->do_top_reset) { +		trans->do_top_reset = 0; + +#define FW_TOP_RESET_TIMEOUT	(HZ / 4) +		ret = wait_event_timeout(trans_pcie->fw_reset_waitq, +					 trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED, +					 FW_TOP_RESET_TIMEOUT); + +		if (trans_pcie->fw_reset_state != FW_RESET_OK) { +			if (trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED) +				IWL_ERR(trans, +					"TOP reset interrupted by error (state %d)!\n", +					trans_pcie->fw_reset_state); +			else +				IWL_ERR(trans, "TOP reset timed out!\n"); +			iwl_op_mode_nic_error(trans->op_mode, +					      IWL_ERR_TYPE_TOP_RESET_FAILED); +			iwl_trans_schedule_reset(trans, +						 IWL_ERR_TYPE_TOP_RESET_FAILED); +			ret = -EIO; +			goto out; +		} + +		msleep(10); +		IWL_INFO(trans, "TOP reset successful, reinit now\n"); +		/* now load the firmware again properly */ +		ret = _iwl_trans_pcie_start_hw(trans); +		if (ret) { +			IWL_ERR(trans, "failed to start HW after TOP reset\n"); +			goto out; +		} +		trans_pcie->prph_scratch->ctrl_cfg.control.control_flags &= +			~cpu_to_le32(IWL_PRPH_SCRATCH_TOP_RESET); +		top_reset_done = true; +		goto again; +	} +  	/* re-check RF-Kill state since we may have missed the interrupt */  	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);  	if (hw_rfkill && !run_in_rfkill) @@ -550,3 +635,23 @@ out:  	mutex_unlock(&trans_pcie->mutex);  	return ret;  } + +void iwl_trans_pcie_gen2_op_mode_leave(struct iwl_trans *trans) +{ +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + +	mutex_lock(&trans_pcie->mutex); + +	/* disable interrupts - don't enable HW RF kill interrupt */ +	iwl_disable_interrupts(trans); + +	iwl_pcie_gen2_apm_stop(trans, true); + +	iwl_disable_interrupts(trans); + +	iwl_pcie_disable_ict(trans); + +	mutex_unlock(&trans_pcie->mutex); + +	iwl_pcie_synchronize_irqs(trans); +} diff --git a/sys/contrib/dev/iwlwifi/pcie/trans.c b/sys/contrib/dev/iwlwifi/pcie/gen1_2/trans.c index 30be42af1ae1..340a3dd7055c 100644 --- a/sys/contrib/dev/iwlwifi/pcie/trans.c +++ b/sys/contrib/dev/iwlwifi/pcie/gen1_2/trans.c @@ -28,137 +28,22 @@  #include "fw/error-dump.h"  #include "fw/dbg.h"  #include "fw/api/tx.h" +#include "fw/acpi.h" +#include "fw/api/tx.h"  #include "mei/iwl-mei.h"  #include "internal.h"  #include "iwl-fh.h" -#include "iwl-context-info-gen3.h" +#include "pcie/iwl-context-info-v2.h" +#include "pcie/utils.h"  /* extended range in FW SRAM */  #define IWL_FW_MEM_EXTENDED_START	0x40000  #define IWL_FW_MEM_EXTENDED_END		0x57FFF -void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) -{ -#define PCI_DUMP_SIZE		352 -#define PCI_MEM_DUMP_SIZE	64 -#define PCI_PARENT_DUMP_SIZE	524 -#define PREFIX_LEN		32 -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct pci_dev *pdev = trans_pcie->pci_dev; -	u32 i, pos, alloc_size, *ptr, *buf; -	char *prefix; - -	if (trans_pcie->pcie_dbg_dumped_once) -		return; - -	/* Should be a multiple of 4 */ -	BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); -	BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); -	BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); - -	/* Alloc a max size buffer */ -	alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN; -	alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); -	alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); -	alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); - -	buf = kmalloc(alloc_size, GFP_ATOMIC); -	if (!buf) -		return; -	prefix = (char *)buf + alloc_size - PREFIX_LEN; - -	IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); - -	/* Print wifi device registers */ -	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); -	IWL_ERR(trans, "iwlwifi device config registers:\n"); -	for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) -		if (pci_read_config_dword(pdev, i, ptr)) -			goto err_read; -#if defined(__linux__) -	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); -#elif defined(__FreeBSD__) -	iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); -#endif - -	IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); -	for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) -		*ptr = iwl_read32(trans, i); -#if defined(__linux__) -	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); -#elif defined(__FreeBSD__) -	iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); -#endif - -	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); -	if (pos) { -		IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); -		for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) -			if (pci_read_config_dword(pdev, pos + i, ptr)) -				goto err_read; -#if defined(__linux__) -		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, -			       32, 4, buf, i, 0); -#elif defined(__FreeBSD__) -		iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); -#endif -	} - -	/* Print parent device registers next */ -	if (!pdev->bus->self) -		goto out; - -	pdev = pdev->bus->self; -	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); - -	IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", -		pci_name(pdev)); -	for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) -		if (pci_read_config_dword(pdev, i, ptr)) -			goto err_read; -#if defined(__linux__) -	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); -#elif defined(__FreeBSD__) -	iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); -#endif - -	/* Print root port AER registers */ -	pos = 0; -	pdev = pcie_find_root_port(pdev); -	if (pdev) -		pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); -	if (pos) { -		IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", -			pci_name(pdev)); -		sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); -		for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) -			if (pci_read_config_dword(pdev, pos + i, ptr)) -				goto err_read; -#if defined(__linux__) -		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, -			       4, buf, i, 0); -#elif defined(__FreeBSD__) -		iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); -#endif -	} -	goto out; - -err_read: -#if defined(__linux__) -	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); -#elif defined(__FreeBSD__) -	iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); -#endif -	IWL_ERR(trans, "Read failed at 0x%X\n", i); -out: -	trans_pcie->pcie_dbg_dumped_once = 1; -	kfree(buf); -} -  int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)  {  	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */ -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {  		iwl_set_bit(trans, CSR_GP_CNTRL,  			    CSR_GP_CNTRL_REG_FLAG_SW_RESET);  		usleep_range(10000, 20000); @@ -264,7 +149,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)  static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)  { -	if (trans->cfg->apmg_not_supported) +	if (trans->mac_cfg->base->apmg_not_supported)  		return;  	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) @@ -320,7 +205,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)  	 */  	/* Disable L0S exit timer (platform NMI Work/Around) */ -	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)  		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,  			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); @@ -339,12 +224,12 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)  	 * wake device's PCI Express link L1a -> L0s  	 */  	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); +		    CSR_HW_IF_CONFIG_REG_HAP_WAKE);  	iwl_pcie_apm_config(trans);  	/* Configure analog phase-lock-loop before activating to D0A */ -	if (trans->trans_cfg->base_params->pll_cfg) +	if (trans->mac_cfg->base->pll_cfg)  		iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);  	ret = iwl_finish_nic_init(trans); @@ -380,7 +265,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)  	 * bits do not disable clocks.  This preserves any hardware  	 * bits already set by default in "CLK_CTRL_REG" after reset.  	 */ -	if (!trans->cfg->apmg_not_supported) { +	if (!trans->mac_cfg->base->apmg_not_supported) {  		iwl_write_prph(trans, APMG_CLK_EN_REG,  			       APMG_CLK_VAL_DMA_CLK_RQT);  		udelay(20); @@ -414,8 +299,8 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)  	u32 dl_cfg_reg;  	/* Force XTAL ON */ -	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, -				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); +	iwl_trans_set_bit(trans, CSR_GP_CNTRL, +			  CSR_GP_CNTRL_REG_FLAG_XTAL_ON);  	ret = iwl_trans_pcie_sw_reset(trans, true); @@ -424,8 +309,8 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)  	if (WARN_ON(ret)) {  		/* Release XTAL ON request */ -		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, -					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON); +		iwl_trans_clear_bit(trans, CSR_GP_CNTRL, +				    CSR_GP_CNTRL_REG_FLAG_XTAL_ON);  		return;  	} @@ -467,7 +352,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)  	 * SHRD_HW_RST is applied in S3.  	 */  	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE); +		    CSR_HW_IF_CONFIG_REG_PERSISTENCE);  	/*  	 * Clear "initialization complete" bit to move adapter from @@ -476,12 +361,12 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)  	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);  	/* Activates XTAL resources monitor */ -	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, -				 CSR_MONITOR_XTAL_RESOURCES); +	iwl_trans_set_bit(trans, CSR_MONITOR_CFG_REG, +			  CSR_MONITOR_XTAL_RESOURCES);  	/* Release XTAL ON request */ -	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, -				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON); +	iwl_trans_clear_bit(trans, CSR_GP_CNTRL, +			    CSR_GP_CNTRL_REG_FLAG_XTAL_ON);  	udelay(10);  	/* Release APMG XTAL */ @@ -496,24 +381,22 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)  	/* stop device's busmaster DMA activity */ -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {  		iwl_set_bit(trans, CSR_GP_CNTRL,  			    CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); -		ret = iwl_poll_bit(trans, CSR_GP_CNTRL, -				   CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, -				   CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, -				   100); +		ret = iwl_poll_bits(trans, CSR_GP_CNTRL, +				    CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, +				    100);  		usleep_range(10000, 20000);  	} else {  		iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); -		ret = iwl_poll_bit(trans, CSR_RESET, -				   CSR_RESET_REG_FLAG_MASTER_DISABLED, -				   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); +		ret = iwl_poll_bits(trans, CSR_RESET, +				    CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);  	} -	if (ret < 0) +	if (ret)  		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");  	IWL_DEBUG_INFO(trans, "stop master\n"); @@ -528,16 +411,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)  			iwl_pcie_apm_init(trans);  		/* inform ME that we are leaving */ -		if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) +		if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000)  			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,  					  APMG_PCIDEV_STT_VAL_WAKE_ME); -		else if (trans->trans_cfg->device_family >= +		else if (trans->mac_cfg->device_family >=  			 IWL_DEVICE_FAMILY_8000) {  			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,  				    CSR_RESET_LINK_PWR_MGMT_DISABLED);  			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -				    CSR_HW_IF_CONFIG_REG_PREPARE | -				    CSR_HW_IF_CONFIG_REG_ENABLE_PME); +				    CSR_HW_IF_CONFIG_REG_WAKE_ME | +				    CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);  			mdelay(1);  			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,  				      CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -592,7 +475,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)  		return -ENOMEM;  	} -	if (trans->trans_cfg->base_params->shadow_reg_enable) { +	if (trans->mac_cfg->base->shadow_reg_enable) {  		/* enable shadow regs in HW */  		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);  		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); @@ -609,18 +492,17 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)  	int ret;  	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); +		    CSR_HW_IF_CONFIG_REG_PCI_OWN_SET);  	/* See if we got it */ -	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, -			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, -			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, -			   HW_READY_TIMEOUT); +	ret = iwl_poll_bits(trans, CSR_HW_IF_CONFIG_REG, +			    CSR_HW_IF_CONFIG_REG_PCI_OWN_SET, +			    HW_READY_TIMEOUT); -	if (ret >= 0) +	if (!ret)  		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); -	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); +	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret ? " not" : "");  	return ret;  } @@ -634,7 +516,7 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)  	ret = iwl_pcie_set_hw_ready(trans);  	/* If the card is ready, exit 0 */ -	if (ret >= 0) { +	if (!ret) {  		trans->csme_own = false;  		return 0;  	} @@ -648,11 +530,11 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)  		/* If HW is not ready, prepare the conditions to check again */  		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -			    CSR_HW_IF_CONFIG_REG_PREPARE); +			    CSR_HW_IF_CONFIG_REG_WAKE_ME);  		do {  			ret = iwl_pcie_set_hw_ready(trans); -			if (ret >= 0) { +			if (!ret) {  				trans->csme_own = false;  				return 0;  			} @@ -661,7 +543,7 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)  				IWL_DEBUG_INFO(trans,  					       "Couldn't prepare the card but SAP is connected\n");  				trans->csme_own = true; -				if (trans->trans_cfg->device_family != +				if (trans->mac_cfg->device_family !=  				    IWL_DEVICE_FAMILY_9000)  					IWL_ERR(trans,  						"SAP not supported for this NIC family\n"); @@ -731,7 +613,7 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,  				 trans_pcie->ucode_write_complete, 5 * HZ);  	if (!ret) {  		IWL_ERR(trans, "Failed to load firmware chunk!\n"); -		iwl_trans_pcie_dump_regs(trans); +		iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);  		return -ETIMEDOUT;  	} @@ -846,7 +728,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,  	iwl_enable_interrupts(trans); -	if (trans->trans_cfg->gen2) { +	if (trans->mac_cfg->gen2) {  		if (cpu == 1)  			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,  				       0xFFFF); @@ -1004,7 +886,7 @@ monitor:  	if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {  		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),  			       fw_mon->physical >> dest->base_shift); -		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) +		if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)  			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),  				       (fw_mon->physical + fw_mon->size -  					256) >> dest->end_shift); @@ -1180,7 +1062,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)  	 */  	iwl_pcie_map_list(trans, causes_list_common,  			  ARRAY_SIZE(causes_list_common), val); -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)  		iwl_pcie_map_list(trans, causes_list_bz,  				  ARRAY_SIZE(causes_list_bz), val);  	else @@ -1202,7 +1084,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)  	 * the other (N - 2) interrupt vectors.  	 */  	val = BIT(MSIX_FH_INT_CAUSES_Q(0)); -	for (idx = 1; idx < trans->num_rx_queues; idx++) { +	for (idx = 1; idx < trans->info.num_rxqs; idx++) {  		iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),  			   MSIX_FH_INT_CAUSES_Q(idx - offset));  		val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); @@ -1223,7 +1105,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)  	struct iwl_trans *trans = trans_pcie->trans;  	if (!trans_pcie->msix_enabled) { -		if (trans->trans_cfg->mq_rx_supported && +		if (trans->mac_cfg->mq_rx_supported &&  		    test_bit(STATUS_DEVICE_ENABLED, &trans->status))  			iwl_write_umac_prph(trans, UREG_CHICK,  					    UREG_CHICK_MSI_ENABLE); @@ -1298,7 +1180,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)  		iwl_pcie_rx_stop(trans);  		/* Power-down device's busmaster DMA clocks */ -		if (!trans->cfg->apmg_not_supported) { +		if (!trans->mac_cfg->base->apmg_not_supported) {  			iwl_write_prph(trans, APMG_CLK_DIS_REG,  				       APMG_CLK_VAL_DMA_CLK_RQT);  			udelay(5); @@ -1306,7 +1188,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)  	}  	/* Make sure (redundant) we've released our request to stay awake */ -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)  		iwl_clear_bit(trans, CSR_GP_CNTRL,  			      CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);  	else @@ -1364,7 +1246,9 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)  }  int iwl_trans_pcie_start_fw(struct iwl_trans *trans, -			    const struct fw_img *fw, bool run_in_rfkill) +			    const struct iwl_fw *fw, +			    const struct fw_img *img, +			    bool run_in_rfkill)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	bool hw_rfkill; @@ -1435,10 +1319,10 @@ int iwl_trans_pcie_start_fw(struct iwl_trans *trans,  	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);  	/* Load the given image to the HW */ -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) -		ret = iwl_pcie_load_given_ucode_8000(trans, fw); +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) +		ret = iwl_pcie_load_given_ucode_8000(trans, img);  	else -		ret = iwl_pcie_load_given_ucode(trans, fw); +		ret = iwl_pcie_load_given_ucode(trans, img);  	/* re-check RF-Kill state since we may have missed the interrupt */  	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); @@ -1450,10 +1334,10 @@ out:  	return ret;  } -void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) +void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)  {  	iwl_pcie_reset_ict(trans); -	iwl_pcie_tx_start(trans, scd_addr); +	iwl_pcie_tx_start(trans);  }  void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, @@ -1512,12 +1396,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)  	IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",  		 state ? "disabled" : "enabled");  	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) && -	    !WARN_ON(trans->trans_cfg->gen2)) +	    !WARN_ON(trans->mac_cfg->gen2))  		_iwl_trans_pcie_stop_device(trans, from_irq);  } -void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, -				  bool test, bool reset) +static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, +					 bool test, bool reset)  {  	iwl_disable_interrupts(trans); @@ -1532,7 +1416,7 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,  	iwl_pcie_synchronize_irqs(trans); -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {  		iwl_clear_bit(trans, CSR_GP_CNTRL,  			      CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);  		iwl_clear_bit(trans, CSR_GP_CNTRL, @@ -1561,30 +1445,41 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int ret; -	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) +		return 0; + +	trans_pcie->sx_state = IWL_SX_WAITING; + +	if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)  		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,  				    suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :  					      UREG_DOORBELL_TO_ISR6_RESUME); -	else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +	else  		iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,  			    suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :  				      CSR_IPC_SLEEP_CONTROL_RESUME); -	else -		return 0;  	ret = wait_event_timeout(trans_pcie->sx_waitq, -				 trans_pcie->sx_complete, 2 * HZ); - -	/* Invalidate it toward next suspend or resume */ -	trans_pcie->sx_complete = false; - +				 trans_pcie->sx_state != IWL_SX_WAITING, +				 2 * HZ);  	if (!ret) {  		IWL_ERR(trans, "Timeout %s D3\n",  			suspend ? "entering" : "exiting"); -		return -ETIMEDOUT; +		ret = -ETIMEDOUT; +	} else { +		ret = 0;  	} -	return 0; +	if (trans_pcie->sx_state == IWL_SX_ERROR) { +		IWL_ERR(trans, "FW error while %s D3\n", +			suspend ? "entering" : "exiting"); +		ret = -EIO; +	} + +	/* Invalidate it toward next suspend or resume */ +	trans_pcie->sx_state = IWL_SX_INVALID; + +	return ret;  }  int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) @@ -1594,7 +1489,7 @@ int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)  	if (!reset)  		/* Enable persistence mode to avoid reset */  		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE); +			    CSR_HW_IF_CONFIG_REG_PERSISTENCE);  	ret = iwl_pcie_d3_handshake(trans, true);  	if (ret) @@ -1620,7 +1515,7 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,  		goto out;  	} -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)  		iwl_set_bit(trans, CSR_GP_CNTRL,  			    CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);  	else @@ -1671,6 +1566,8 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,  out:  	if (*status == IWL_D3_STATUS_ALIVE)  		ret = iwl_pcie_d3_handshake(trans, false); +	else +		trans->state = IWL_TRANS_NO_FW;  	return ret;  } @@ -1678,17 +1575,18 @@ out:  static void  iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,  			    struct iwl_trans *trans, -			    const struct iwl_cfg_trans_params *cfg_trans) +			    const struct iwl_mac_cfg *mac_cfg, +			    struct iwl_trans_info *info)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int max_irqs, num_irqs, i, ret;  	u16 pci_cmd;  	u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; -	if (!cfg_trans->mq_rx_supported) +	if (!mac_cfg->mq_rx_supported)  		goto enable_msi; -	if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) +	if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000)  		max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;  	max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); @@ -1718,27 +1616,28 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,  	 * More than two interrupts: we will use fewer RSS queues.  	 */  	if (num_irqs <= max_irqs - 2) { -		trans_pcie->trans->num_rx_queues = num_irqs + 1; +		info->num_rxqs = num_irqs + 1;  		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |  			IWL_SHARED_IRQ_FIRST_RSS;  	} else if (num_irqs == max_irqs - 1) { -		trans_pcie->trans->num_rx_queues = num_irqs; +		info->num_rxqs = num_irqs;  		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;  	} else { -		trans_pcie->trans->num_rx_queues = num_irqs - 1; +		info->num_rxqs = num_irqs - 1;  	}  	IWL_DEBUG_INFO(trans,  		       "MSI-X enabled with rx queues %d, vec mask 0x%x\n", -		       trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); +		       info->num_rxqs, trans_pcie->shared_vec_mask); -	WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); +	WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES);  	trans_pcie->alloc_vecs = num_irqs;  	trans_pcie->msix_enabled = true;  	return;  enable_msi: +	info->num_rxqs = 1;  	ret = pci_enable_msi(pdev);  	if (ret) {  		dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); @@ -1751,14 +1650,15 @@ enable_msi:  	}  } -static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) +static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans, +				      struct iwl_trans_info *info)  {  #if defined(CONFIG_SMP)  	int iter_rx_q, i, ret, cpu, offset;  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; -	iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; +	iter_rx_q = info->num_rxqs - 1 + i;  	offset = 1 + i;  	for (; i < iter_rx_q ; i++) {  		/* @@ -1778,7 +1678,8 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)  }  static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, -				      struct iwl_trans_pcie *trans_pcie) +				      struct iwl_trans_pcie *trans_pcie, +				      struct iwl_trans_info *info)  {  	int i; @@ -1807,7 +1708,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,  			return ret;  		}  	} -	iwl_pcie_irq_set_affinity(trans_pcie->trans); +	iwl_pcie_irq_set_affinity(trans_pcie->trans, info);  	return 0;  } @@ -1816,7 +1717,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)  {  	u32 hpm, wprot; -	switch (trans->trans_cfg->device_family) { +	switch (trans->mac_cfg->device_family) {  	case IWL_DEVICE_FAMILY_9000:  		wprot = PREG_PRPH_WPROT_9000;  		break; @@ -1864,7 +1765,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)  	return iwl_trans_pcie_sw_reset(trans, true);  } -static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) +int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int err; @@ -1885,8 +1786,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)  	if (err)  		return err; -	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && -	    trans->trans_cfg->integrated) { +	if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 && +	    trans->mac_cfg->integrated) {  		err = iwl_pcie_gen2_force_power_gating(trans);  		if (err)  			return err; @@ -1962,15 +1863,11 @@ u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)  #elif defined(__FreeBSD__)  void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)  { - -	IWL_DEBUG_PCI_RW(trans, "W1 %#010x %#04x\n", ofs, val);  	bus_write_1((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);  }  void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)  { - -	IWL_DEBUG_PCI_RW(trans, "W4 %#010x %#010x\n", ofs, val);  	bus_write_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);  } @@ -1979,14 +1876,13 @@ u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)  	u32 v;  	v = bus_read_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs); -	IWL_DEBUG_PCI_RW(trans, "R4 %#010x %#010x\n", ofs, v);  	return (v);  }  #endif  static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)  { -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)  		return 0x00FFFFFF;  	else  		return 0x000FFFFF; @@ -2010,46 +1906,17 @@ void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)  	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);  } -void iwl_trans_pcie_configure(struct iwl_trans *trans, -			      const struct iwl_trans_config *trans_cfg) +void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	/* free all first - we might be reconfigured for a different size */  	iwl_pcie_free_rbs_pool(trans); -	trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue; -	trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo; -	trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; -	trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs; -	trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); -	trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; - -	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) -		trans_pcie->n_no_reclaim_cmds = 0; -	else -		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; -	if (trans_pcie->n_no_reclaim_cmds) -		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, -		       trans_pcie->n_no_reclaim_cmds * sizeof(u8)); - -	trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;  	trans_pcie->rx_page_order = -		iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); +		iwl_trans_get_rb_size_order(trans->conf.rx_buf_size);  	trans_pcie->rx_buf_bytes = -		iwl_trans_get_rb_size(trans_pcie->rx_buf_size); -	trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) -		trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); - -	trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword; -	trans_pcie->scd_set_active = trans_cfg->scd_set_active; - -	trans->command_groups = trans_cfg->command_groups; -	trans->command_groups_size = trans_cfg->command_groups_size; - - -	trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; +		iwl_trans_get_rb_size(trans->conf.rx_buf_size);  }  void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, @@ -2077,11 +1944,14 @@ void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions  static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)  { -	iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd); +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + +	iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd);  }  static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)  { +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_cmd_header_wide bad_cmd = {  		.cmd = INVALID_WR_PTR_CMD,  		.group_id = DEBUG_GROUP, @@ -2091,11 +1961,11 @@ static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)  	};  	int ret; -	ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd, +	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd,  				     sizeof(bad_cmd));  	if (ret)  		return ret; -	memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd)); +	memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));  	return 0;  } @@ -2106,7 +1976,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)  	iwl_pcie_synchronize_irqs(trans); -	if (trans->trans_cfg->gen2) +	if (trans->mac_cfg->gen2)  		iwl_txq_gen2_tx_free(trans);  	else  		iwl_pcie_tx_free(trans); @@ -2159,10 +2029,157 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)  	iwl_trans_free(trans);  } +static union acpi_object * +iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value) +{ +#ifdef CONFIG_ACPI +	struct iwl_dsm_internal_product_reset_cmd pldr_arg = { +		.cmd = cmd, +		.value = value, +	}; +	union acpi_object arg = { +		.buffer.type = ACPI_TYPE_BUFFER, +		.buffer.length = sizeof(pldr_arg), +		.buffer.pointer = (void *)&pldr_arg, +	}; +	static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29, +						 0x81, 0x4F, 0x75, 0xE4, +						 0xDD, 0x26, 0xB5, 0xFD); + +	if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV, +			    DSM_INTERNAL_FUNC_PRODUCT_RESET)) +		return ERR_PTR(-ENODEV); + +	return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV, +				       DSM_INTERNAL_FUNC_PRODUCT_RESET, +				       &arg, &dsm_guid); +#else +	return ERR_PTR(-EOPNOTSUPP); +#endif +} + +void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev) +{ +	union acpi_object *res; + +	res = iwl_trans_pcie_call_prod_reset_dsm(pdev, +						 DSM_INTERNAL_PLDR_CMD_GET_MODE, +						 0); +	if (IS_ERR(res)) +		return; + +	if (res->type != ACPI_TYPE_INTEGER) +		IWL_ERR_DEV(&pdev->dev, +			    "unexpected return type from product reset DSM\n"); +	else +		IWL_DEBUG_DEV_POWER(&pdev->dev, +				    "product reset mode is 0x%llx\n", +				    res->integer.value); + +	ACPI_FREE(res); +} + +static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable, +					     bool integrated) +{ +	union acpi_object *res; +	u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0; + +	if (!integrated) +		mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR | +			DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON; + +	res = iwl_trans_pcie_call_prod_reset_dsm(pdev, +						 DSM_INTERNAL_PLDR_CMD_SET_MODE, +						 mode); +	if (IS_ERR(res)) { +		if (enable) +			IWL_ERR_DEV(&pdev->dev, +				    "ACPI _DSM not available (%d), cannot do product reset\n", +				    (int)PTR_ERR(res)); +		return; +	} + +	ACPI_FREE(res); +	IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n", +			    enable ? "En" : "Dis"); +	iwl_trans_pcie_check_product_reset_mode(pdev); +} + +void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev) +{ +	union acpi_object *res; + +	res = iwl_trans_pcie_call_prod_reset_dsm(pdev, +						 DSM_INTERNAL_PLDR_CMD_GET_STATUS, +						 0); +	if (IS_ERR(res)) +		return; + +	if (res->type != ACPI_TYPE_INTEGER) +		IWL_ERR_DEV(&pdev->dev, +			    "unexpected return type from product reset DSM\n"); +	else +		IWL_DEBUG_DEV_POWER(&pdev->dev, +				    "product reset status is 0x%llx\n", +				    res->integer.value); + +	ACPI_FREE(res); +} + +static void iwl_trans_pcie_call_reset(struct pci_dev *pdev) +{ +#ifdef CONFIG_ACPI +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; +	union acpi_object *p, *ref; +	acpi_status status; +	int ret = -EINVAL; + +	status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev), +				      "_PRR", NULL, &buffer); +	if (ACPI_FAILURE(status)) { +		IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n"); +		goto out; +	} +	p = buffer.pointer; + +	if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) { +		pci_err(pdev, "Bad _PRR return type\n"); +		goto out; +	} + +	ref = &p->package.elements[0]; +	if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) { +		pci_err(pdev, "_PRR wasn't a reference\n"); +		goto out; +	} + +	status = acpi_evaluate_object(ref->reference.handle, +				      "_RST", NULL, NULL); +	if (ACPI_FAILURE(status)) { +		pci_err(pdev, +			"Failed to call _RST on object returned by _PRR (%d)\n", +			status); +		goto out; +	} +	ret = 0; +out: +	kfree(buffer.pointer); +	if (!ret) { +		IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n"); +		return; +	} +	IWL_DEBUG_DEV_POWER(&pdev->dev, +			    "No BIOS support, using pci_reset_function()\n"); +#endif +	pci_reset_function(pdev); +} +  struct iwl_trans_pcie_removal {  	struct pci_dev *pdev;  	struct work_struct work; -	bool rescan; +	enum iwl_reset_mode mode; +	bool integrated;  };  static void iwl_trans_pcie_removal_wk(struct work_struct *wk) @@ -2180,17 +2197,71 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)  	if (!bus)  		goto out; -	dev_err(&pdev->dev, "Device gone - attempting removal\n"); -  	kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); +	if (removal->mode == IWL_RESET_MODE_PROD_RESET) { +		struct pci_dev *bt = NULL; + +		if (!removal->integrated) { +			/* discrete devices have WiFi/BT at function 0/1 */ +			int slot = PCI_SLOT(pdev->devfn); +			int func = PCI_FUNC(pdev->devfn); + +			if (func == 0) +				bt = pci_get_slot(bus, PCI_DEVFN(slot, 1)); +			else +				pci_info(pdev, "Unexpected function %d\n", +					 func); +		} else { +			/* on integrated we have to look up by ID (same bus) */ +			static const struct pci_device_id bt_device_ids[] = { +#define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) } +				BT_DEV(0xA876), /* LNL */ +				BT_DEV(0xE476), /* PTL-P */ +				BT_DEV(0xE376), /* PTL-H */ +				BT_DEV(0xD346), /* NVL-H */ +				BT_DEV(0x6E74), /* NVL-S */ +				BT_DEV(0x4D76), /* WCL */ +				BT_DEV(0xD246), /* RZL-H */ +				BT_DEV(0x6C46), /* RZL-M */ +				{} +			}; +			struct pci_dev *tmp = NULL; + +			for_each_pci_dev(tmp) { +				if (tmp->bus != bus) +					continue; + +				if (pci_match_id(bt_device_ids, tmp)) { +					bt = tmp; +					break; +				} +			} +		} + +		if (bt) { +			pci_info(bt, "Removal by WiFi due to product reset\n"); +			pci_stop_and_remove_bus_device(bt); +			pci_dev_put(bt); +		} +	} + +	iwl_trans_pcie_set_product_reset(pdev, +					 removal->mode == +						IWL_RESET_MODE_PROD_RESET, +					 removal->integrated); +	if (removal->mode >= IWL_RESET_MODE_FUNC_RESET) +		iwl_trans_pcie_call_reset(pdev); +  	pci_stop_and_remove_bus_device(pdev);  	pci_dev_put(pdev); -	if (removal->rescan) { +	if (removal->mode >= IWL_RESET_MODE_RESCAN) {  #if defined(__linux__)  		if (bus->parent)  			bus = bus->parent; +#elif defined(__FreeBSD__) +		/* XXX-TODO */  #endif  		pci_rescan_bus(bus);  	} @@ -2202,14 +2273,29 @@ out:  	module_put(THIS_MODULE);  } -void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan) +void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)  { +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_trans_pcie_removal *removal; +	char _msg = 0, *msg = &_msg; + +	if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY || +		    mode == IWL_RESET_MODE_BACKOFF)) +		return;  	if (test_bit(STATUS_TRANS_DEAD, &trans->status))  		return; -	IWL_ERR(trans, "Device gone - scheduling removal!\n"); +	if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) { +		mode = IWL_RESET_MODE_FUNC_RESET; +		if (trans_pcie->me_present < 0) +			msg = " instead of product reset as ME may be present"; +		else +			msg = " instead of product reset as ME is present"; +	} + +	IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg); +  	iwl_pcie_dump_csr(trans);  	/* @@ -2236,18 +2322,19 @@ void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)  	set_bit(STATUS_TRANS_DEAD, &trans->status);  	removal->pdev = to_pci_dev(trans->dev); -	removal->rescan = rescan; +	removal->mode = mode; +	removal->integrated = trans->mac_cfg->integrated;  	INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);  	pci_dev_get(removal->pdev);  	schedule_work(&removal->work);  } -EXPORT_SYMBOL(iwl_trans_pcie_remove); +EXPORT_SYMBOL(iwl_trans_pcie_reset);  /*   * This version doesn't disable BHs but rather assumes they're   * already disabled.   */ -bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)  {  	int ret;  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2264,15 +2351,15 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)  	if (trans_pcie->cmd_hold_nic_awake)  		goto out; -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {  		write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;  		mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;  		poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;  	}  	/* this bit wakes up the NIC */ -	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) +	iwl_trans_set_bit(trans, CSR_GP_CNTRL, write); +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)  		udelay(2);  	/* @@ -2295,18 +2382,24 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)  	 * 5000 series and later (including 1000 series) have non-volatile SRAM,  	 * and do not save/restore SRAM when power cycling.  	 */ -	ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); -	if (unlikely(ret < 0)) { +	ret = iwl_poll_bits_mask(trans, CSR_GP_CNTRL, poll, mask, 15000); +	if (unlikely(ret)) {  		u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); +		if (silent) { +			spin_unlock(&trans_pcie->reg_lock); +			return false; +		} +  		WARN_ONCE(1,  			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",  			  cntrl); -		iwl_trans_pcie_dump_regs(trans); +		iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);  		if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) -			iwl_trans_pcie_remove(trans, false); +			iwl_trans_pcie_reset(trans, +					     IWL_RESET_MODE_REMOVE_ONLY);  		else  			iwl_write32(trans, CSR_RESET,  				    CSR_RESET_REG_FLAG_FORCE_NMI); @@ -2329,7 +2422,7 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)  	bool ret;  	local_bh_disable(); -	ret = __iwl_trans_pcie_grab_nic_access(trans); +	ret = __iwl_trans_pcie_grab_nic_access(trans, false);  	if (ret) {  		/* keep BHs disabled until iwl_trans_pcie_release_nic_access */  		return ret; @@ -2338,7 +2431,8 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)  	return false;  } -void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) +void __releases(nic_access_nobh) +iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2352,12 +2446,12 @@ void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)  	if (trans_pcie->cmd_hold_nic_awake)  		goto out; -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) -		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, -					   CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +		iwl_trans_clear_bit(trans, CSR_GP_CNTRL, +				    CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);  	else -		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, -					   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +		iwl_trans_clear_bit(trans, CSR_GP_CNTRL, +				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);  	/*  	 * Above we read the CSR_GP_CNTRL register, which will flush  	 * any previous writes, but we need the write that clears the @@ -2365,6 +2459,7 @@ void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)  	 * scheduled on different CPUs (after we drop reg_lock).  	 */  out: +	__release(nic_access_nobh);  	spin_unlock_bh(&trans_pcie->reg_lock);  } @@ -2418,24 +2513,6 @@ int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,  	return 0;  } -int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, -			     const void *buf, int dwords) -{ -	int offs, ret = 0; -	const u32 *vals = buf; - -	if (iwl_trans_grab_nic_access(trans)) { -		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); -		for (offs = 0; offs < dwords; offs++) -			iwl_write32(trans, HBUS_TARG_MEM_WDAT, -				    vals ? vals[offs] : 0); -		iwl_trans_release_nic_access(trans); -	} else { -		ret = -EBUSY; -	} -	return ret; -} -  int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,  				 u32 *val)  { @@ -2450,7 +2527,7 @@ int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	if (queue >= trans->num_rx_queues || !trans_pcie->rxq) +	if (queue >= trans->info.num_rxqs || !trans_pcie->rxq)  		return -EINVAL;  	data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; @@ -2531,10 +2608,10 @@ int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)  	/* waiting for all the tx frames complete might take a while */  	for (cnt = 0; -	     cnt < trans->trans_cfg->base_params->num_of_queues; +	     cnt < trans->mac_cfg->base->num_of_queues;  	     cnt++) { -		if (cnt == trans_pcie->txqs.cmd.q_id) +		if (cnt == trans->conf.cmd_queue)  			continue;  		if (!test_bit(cnt, trans_pcie->txqs.queue_used))  			continue; @@ -2555,7 +2632,7 @@ void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	spin_lock_bh(&trans_pcie->reg_lock); -	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); +	_iwl_trans_set_bits_mask(trans, reg, mask, value);  	spin_unlock_bh(&trans_pcie->reg_lock);  } @@ -2675,7 +2752,7 @@ static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)  	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;  	struct iwl_dbgfs_tx_queue_state *state; -	if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) +	if (*pos >= priv->trans->mac_cfg->base->num_of_queues)  		return NULL;  	state = kmalloc(sizeof(*state), GFP_KERNEL); @@ -2693,7 +2770,7 @@ static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,  	*pos = ++state->pos; -	if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) +	if (*pos >= priv->trans->mac_cfg->base->num_of_queues)  		return NULL;  	return state; @@ -2725,7 +2802,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)  	else  		seq_puts(seq, "(unallocated)"); -	if (state->pos == trans_pcie->txqs.cmd.q_id) +	if (state->pos == trans->conf.cmd_queue)  		seq_puts(seq, " (HCMD)");  	seq_puts(seq, "\n"); @@ -2763,7 +2840,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,  	int pos = 0, i, ret;  	size_t bufsz; -	bufsz = sizeof(char) * 121 * trans->num_rx_queues; +	bufsz = sizeof(char) * 121 * trans->info.num_rxqs;  	if (!trans_pcie->rxq)  		return -EAGAIN; @@ -2772,9 +2849,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,  	if (!buf)  		return -ENOMEM; -	for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { +	for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) {  		struct iwl_rxq *rxq = &trans_pcie->rxq[i]; +		spin_lock_bh(&rxq->lock); +  		pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",  				 i);  		pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", @@ -2795,6 +2874,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,  			pos += scnprintf(buf + pos, bufsz - pos,  					 "\tclosed_rb_num: Not Allocated\n");  		} +		spin_unlock_bh(&rxq->lock);  	}  	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);  	kfree(buf); @@ -3093,12 +3173,58 @@ static ssize_t iwl_dbgfs_rf_read(struct file *file,  				       strlen(trans_pcie->rf_name));  } +static ssize_t iwl_dbgfs_reset_write(struct file *file, +				     const char __user *user_buf, +				     size_t count, loff_t *ppos) +{ +	struct iwl_trans *trans = file->private_data; +	static const char * const modes[] = { +		[IWL_RESET_MODE_SW_RESET] = "sw", +		[IWL_RESET_MODE_REPROBE] = "reprobe", +		[IWL_RESET_MODE_TOP_RESET] = "top", +		[IWL_RESET_MODE_REMOVE_ONLY] = "remove", +		[IWL_RESET_MODE_RESCAN] = "rescan", +		[IWL_RESET_MODE_FUNC_RESET] = "function", +		[IWL_RESET_MODE_PROD_RESET] = "product", +	}; +	char buf[10] = {}; +	int mode; + +	if (count > sizeof(buf) - 1) +		return -EINVAL; + +	if (copy_from_user(buf, user_buf, count)) +		return -EFAULT; + +	mode = sysfs_match_string(modes, buf); +	if (mode < 0) +		return mode; + +	if (mode < IWL_RESET_MODE_REMOVE_ONLY) { +		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) +			return -EINVAL; +		if (mode == IWL_RESET_MODE_TOP_RESET) { +			if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC) +				return -EINVAL; +			trans->request_top_reset = 1; +		} +		iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS); +		iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS); +		return count; +	} + +	iwl_trans_pcie_reset(trans, mode); + +	return count; +} +  DEBUGFS_READ_WRITE_FILE_OPS(interrupt);  DEBUGFS_READ_FILE_OPS(fh_reg);  DEBUGFS_READ_FILE_OPS(rx_queue);  DEBUGFS_WRITE_FILE_OPS(csr);  DEBUGFS_READ_WRITE_FILE_OPS(rfkill);  DEBUGFS_READ_FILE_OPS(rf); +DEBUGFS_WRITE_FILE_OPS(reset);  static const struct file_operations iwl_dbgfs_tx_queue_ops = {  	.owner = THIS_MODULE, @@ -3127,6 +3253,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)  	DEBUGFS_ADD_FILE(rfkill, dir, 0600);  	DEBUGFS_ADD_FILE(monitor_data, dir, 0400);  	DEBUGFS_ADD_FILE(rf, dir, 0400); +	DEBUGFS_ADD_FILE(reset, dir, 0200);  }  void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) @@ -3225,7 +3352,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,  	(*data)->len = cpu_to_le32(fh_regs_len);  	val = (void *)(*data)->data; -	if (!trans->trans_cfg->gen2) +	if (!trans->mac_cfg->gen2)  		for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;  		     i += sizeof(u32))  			*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); @@ -3272,7 +3399,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,  {  	u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {  		base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;  		base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;  		write_ptr = DBGC_CUR_DBGBUF_STATUS; @@ -3292,7 +3419,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,  		cpu_to_le32(iwl_read_prph(trans, wrap_cnt));  	fw_mon_data->fw_mon_base_ptr =  		cpu_to_le32(iwl_read_prph(trans, base)); -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {  		fw_mon_data->fw_mon_base_high_ptr =  			cpu_to_le32(iwl_read_prph(trans, base_high));  		write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; @@ -3312,8 +3439,8 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,  	if (trans->dbg.dest_tlv ||  	    (fw_mon->size && -	     (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || -	      trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { +	     (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 || +	      trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {  		struct iwl_fw_error_dump_fw_mon *fw_mon_data;  		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); @@ -3336,14 +3463,14 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,  					IWL_LDBG_M2S_BUF_BA_MSK) <<  				       trans->dbg.dest_tlv->base_shift;  				base *= IWL_M2S_UNIT_SIZE; -				base += trans->cfg->smem_offset; +				base += trans->mac_cfg->base->smem_offset;  			} else {  				base = iwl_read_prph(trans, base) <<  				       trans->dbg.dest_tlv->base_shift;  			} -			iwl_trans_read_mem(trans, base, fw_mon_data->data, -					   monitor_len / sizeof(u32)); +			iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data, +						monitor_len / sizeof(u32));  		} else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {  			monitor_len =  				iwl_trans_pci_dump_marbh_monitor(trans, @@ -3377,7 +3504,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)  			base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<  				trans->dbg.dest_tlv->base_shift;  			base *= IWL_M2S_UNIT_SIZE; -			base += trans->cfg->smem_offset; +			base += trans->mac_cfg->base->smem_offset;  			monitor_len =  				(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> @@ -3393,7 +3520,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)  			      trans->dbg.dest_tlv->end_shift;  			/* Make "end" point to the actual end */ -			if (trans->trans_cfg->device_family >= +			if (trans->mac_cfg->device_family >=  			    IWL_DEVICE_FAMILY_8000 ||  			    trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)  				end += (1 << trans->dbg.dest_tlv->end_shift); @@ -3414,13 +3541,13 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_fw_error_dump_data *data; -	struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; +	struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue];  	struct iwl_fw_error_dump_txcmd *txcmd;  	struct iwl_trans_dump_data *dump_data;  	u32 len, num_rbs = 0, monitor_len = 0;  	int i, ptr;  	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && -			!trans->trans_cfg->mq_rx_supported && +			!trans->mac_cfg->mq_rx_supported &&  			dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);  	if (!dump_mask) @@ -3445,7 +3572,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,  	/* FH registers */  	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { -		if (trans->trans_cfg->gen2) +		if (trans->mac_cfg->gen2)  			len += sizeof(*data) +  			       (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -  				iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); @@ -3459,15 +3586,18 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,  		/* Dump RBs is supported only for pre-9000 devices (1 queue) */  		struct iwl_rxq *rxq = &trans_pcie->rxq[0];  		/* RBs */ +		spin_lock_bh(&rxq->lock);  		num_rbs = iwl_get_closed_rb_stts(trans, rxq);  		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; +		spin_unlock_bh(&rxq->lock); +  		len += num_rbs * (sizeof(*data) +  				  sizeof(struct iwl_fw_error_dump_rb) +  				  (PAGE_SIZE << trans_pcie->rx_page_order));  	}  	/* Paged memory for gen2 HW */ -	if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) +	if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))  		for (i = 0; i < trans->init_dram.paging_cnt; i++)  			len += sizeof(*data) +  			       sizeof(struct iwl_fw_error_dump_paging) + @@ -3492,7 +3622,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,  			u8 tfdidx;  			u32 caplen, cmdlen; -			if (trans->trans_cfg->gen2) +			if (trans->mac_cfg->gen2)  				tfdidx = idx;  			else  				tfdidx = ptr; @@ -3532,7 +3662,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,  		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);  	/* Paged memory for gen2 HW */ -	if (trans->trans_cfg->gen2 && +	if (trans->mac_cfg->gen2 &&  	    dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {  		for (i = 0; i < trans->init_dram.paging_cnt; i++) {  			struct iwl_fw_error_dump_paging *paging; @@ -3572,7 +3702,7 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)  	if (trans_pcie->msix_enabled) {  		inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; -		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +		if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)  			sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;  		else  			sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; @@ -3584,36 +3714,60 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)  	iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);  } -struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, -			       const struct pci_device_id *ent, -			       const struct iwl_cfg_trans_params *cfg_trans) +static int iwl_trans_pcie_set_txcmd_info(const struct iwl_mac_cfg *mac_cfg, +					 unsigned int *txcmd_size, +					 unsigned int *txcmd_align) +{ +	if (!mac_cfg->gen2) { +		*txcmd_size = sizeof(struct iwl_tx_cmd_v6); +		*txcmd_align = sizeof(void *); +	} else if (mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { +		*txcmd_size = sizeof(struct iwl_tx_cmd_v9); +		*txcmd_align = 64; +	} else { +		*txcmd_size = sizeof(struct iwl_tx_cmd); +		*txcmd_align = 128; +	} + +	*txcmd_size += sizeof(struct iwl_cmd_header); +	*txcmd_size += 36; /* biggest possible 802.11 header */ + +	/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ +	if (WARN_ON((mac_cfg->gen2 && *txcmd_size >= *txcmd_align))) +		return -EINVAL; + +	return 0; +} + +static struct iwl_trans * +iwl_trans_pcie_alloc(struct pci_dev *pdev, +		     const struct iwl_mac_cfg *mac_cfg, +		     struct iwl_trans_info *info, u8 __iomem *hw_base)  {  	struct iwl_trans_pcie *trans_pcie, **priv; +	unsigned int txcmd_size, txcmd_align;  	struct iwl_trans *trans; +	unsigned int bc_tbl_n_entries;  	int ret, addr_size; -	void __iomem * const *table; -	u32 bar0; - -	/* reassign our BAR 0 if invalid due to possible runtime PM races */ -	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); -	if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { -		ret = pci_assign_resource(pdev, 0); -		if (ret) -			return ERR_PTR(ret); -	} -	ret = pcim_enable_device(pdev); +	ret = iwl_trans_pcie_set_txcmd_info(mac_cfg, &txcmd_size, +					    &txcmd_align);  	if (ret)  		return ERR_PTR(ret);  	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, -				cfg_trans); +				mac_cfg, txcmd_size, txcmd_align);  	if (!trans)  		return ERR_PTR(-ENOMEM);  	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	if (trans->trans_cfg->gen2) { +	trans_pcie->hw_base = hw_base; + +	/* Initialize the wait queue for commands */ +	init_waitqueue_head(&trans_pcie->wait_command_queue); + +	if (trans->mac_cfg->gen2) {  		trans_pcie->txqs.tfd.addr_size = 64;  		trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;  		trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); @@ -3622,7 +3776,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  		trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;  		trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);  	} -	trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie); + +	trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12); +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +		trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11); + +	info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);  #ifdef CONFIG_INET  	trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); @@ -3632,20 +3791,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  	}  #endif -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) -		trans_pcie->txqs.bc_tbl_size = -			sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ; -	else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) -		trans_pcie->txqs.bc_tbl_size = -			sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210; +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) +		bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ; +	else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +		bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;  	else -		trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); +		bc_tbl_n_entries = TFD_QUEUE_BC_SIZE; + +	trans_pcie->txqs.bc_tbl_size = +		sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;  	/*  	 * For gen2 devices, we use a single allocation for each byte-count  	 * table, but they're pretty small (1k) so use a DMA pool that we  	 * allocate here.  	 */ -	if (trans->trans_cfg->gen2) { +	if (trans->mac_cfg->gen2) {  		trans_pcie->txqs.bc_pool =  			dmam_pool_create("iwlwifi:bc", trans->dev,  					 trans_pcie->txqs.bc_tbl_size, @@ -3658,7 +3818,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  	/* Some things must not change even if the config does */  	WARN_ON(trans_pcie->txqs.tfd.addr_size != -		(trans->trans_cfg->gen2 ? 64 : 36)); +		(trans->mac_cfg->gen2 ? 64 : 36));  	/* Initialize NAPI here - it should be before registering to mac80211  	 * in the opmode but after the HW struct is allocated. @@ -3692,7 +3852,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  	trans_pcie->debug_rfkill = -1; -	if (!cfg_trans->base_params->pcie_l1_allowed) { +	if (!mac_cfg->base->pcie_l1_allowed) {  		/*  		 * W/A - seems to solve weird behavior. We need to remove this  		 * if we don't want to stay in L1 all the time. This wastes a @@ -3703,8 +3863,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  				       PCIE_LINK_STATE_CLKPM);  	} -	pci_set_master(pdev); -  	addr_size = trans_pcie->txqs.tfd.addr_size;  	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));  	if (ret) { @@ -3716,29 +3874,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  		}  	} -	ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); -	if (ret) { -		dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); -		goto out_no_pci; -	} - -#if defined(__FreeBSD__) -	linuxkpi_pcim_want_to_use_bus_functions(pdev); -#endif -	table = pcim_iomap_table(pdev); -	if (!table) { -		dev_err(&pdev->dev, "pcim_iomap_table failed\n"); -		ret = -ENOMEM; -		goto out_no_pci; -	} - -	trans_pcie->hw_base = table[0]; -	if (!trans_pcie->hw_base) { -		dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); -		ret = -ENODEV; -		goto out_no_pci; -	} -  	/* We disable the RETRY_TIMEOUT register (0x41) to keep  	 * PCI Tx retries from interfering with C3 CPU state */  	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); @@ -3746,30 +3881,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  	trans_pcie->pci_dev = pdev;  	iwl_disable_interrupts(trans); -	trans->hw_rev = iwl_read32(trans, CSR_HW_REV); -	if (trans->hw_rev == 0xffffffff) { -		dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); -		ret = -EIO; -		goto out_no_pci; -	} -  	/*  	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have  	 * changed, and now the revision step also includes bit 0-1 (no more  	 * "dash" value). To keep hw_rev backwards compatible - we'll store it  	 * in the old format.  	 */ -	if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) -		trans->hw_rev_step = trans->hw_rev & 0xF; +	if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) +		info->hw_rev_step = info->hw_rev & 0xF;  	else -		trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; +		info->hw_rev_step = (info->hw_rev & 0xC) >> 2; -	IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); +	IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev); -	iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); -	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; -	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), -		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); +	iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info);  	init_waitqueue_head(&trans_pcie->sx_waitq); @@ -3778,7 +3903,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  		goto out_no_pci;  	if (trans_pcie->msix_enabled) { -		ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); +		ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info);  		if (ret)  			goto out_no_pci;  	 } else { @@ -3851,9 +3976,332 @@ int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,  				 IMR_D2S_REQUESTED, 5 * HZ);  	if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {  		IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); -		iwl_trans_pcie_dump_regs(trans); +		iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);  		return -ETIMEDOUT;  	}  	trans_pcie->imr_status = IMR_D2S_IDLE;  	return 0;  } + +/* + * Read rf id and cdb info from prph register and store it + */ +static void get_crf_id(struct iwl_trans *iwl_trans, +		       struct iwl_trans_info *info) +{ +	u32 sd_reg_ver_addr; +	u32 hw_wfpm_id; +	u32 val = 0; +	u8 step; + +	if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) +		sd_reg_ver_addr = SD_REG_VER_GEN2; +	else +		sd_reg_ver_addr = SD_REG_VER; + +	/* Enable access to peripheral registers */ +	val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); +	val |= WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK; +	iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); + +	/* Read crf info */ +	info->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); + +	/* Read cnv info */ +	info->hw_cnv_id = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); + +	/* For BZ-W, take B step also when A step is indicated */ +	if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) +		step = SILICON_B_STEP; + +	/* In BZ, the MAC step must be read from the CNVI aux register */ +	if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ) { +		step = CNVI_AUX_MISC_CHIP_MAC_STEP(info->hw_cnv_id); + +		/* For BZ-U, take B step also when A step is indicated */ +		if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(info->hw_cnv_id) == +		    CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) && +		    step == SILICON_A_STEP) +			step = SILICON_B_STEP; +	} + +	if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ || +	    CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) { +		info->hw_rev_step = step; +		info->hw_rev |= step; +	} + +	/* Read cdb info (also contains the jacket info if needed in the future */ +	hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); +	IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n", +		 info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id); +} + +/* + * In case that there is no OTP on the NIC, map the rf id and cdb info + * from the prph registers. + */ +static int map_crf_id(struct iwl_trans *iwl_trans, +		      struct iwl_trans_info *info) +{ +	int ret = 0; +	u32 val = info->hw_crf_id; +	u32 step_id = REG_CRF_ID_STEP(val); +	u32 slave_id = REG_CRF_ID_SLAVE(val); +	u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id); +	u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, +						    WFPM_OTP_CFG1_ADDR); +	u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id); +	u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id); + +	/* Map between crf id to rf id */ +	switch (REG_CRF_ID_TYPE(val)) { +	case REG_CRF_ID_TYPE_JF_1: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12); +		break; +	case REG_CRF_ID_TYPE_JF_2: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); +		break; +	case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); +		break; +	case REG_CRF_ID_TYPE_HR_NONE_CDB: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); +		break; +	case REG_CRF_ID_TYPE_HR_CDB: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); +		break; +	case REG_CRF_ID_TYPE_GF: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); +		break; +	case REG_CRF_ID_TYPE_FM: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); +		break; +	case REG_CRF_ID_TYPE_WHP: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12); +		break; +	case REG_CRF_ID_TYPE_PE: +		info->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12); +		break; +	default: +		ret = -EIO; +		IWL_ERR(iwl_trans, +			"Can't find a correct rfid for crf id 0x%x\n", +			REG_CRF_ID_TYPE(val)); +		goto out; +	} + +	/* Set Step-id */ +	info->hw_rf_id |= (step_id << 8); + +	/* Set CDB capabilities */ +	if (cdb_id_wfpm || slave_id) { +		info->hw_rf_id += BIT(28); +		IWL_INFO(iwl_trans, "Adding cdb to rf id\n"); +	} + +	/* Set Jacket capabilities */ +	if (jacket_id_wfpm || jacket_id_cnv) { +		info->hw_rf_id += BIT(29); +		IWL_INFO(iwl_trans, "Adding jacket to rf id\n"); +	} + +	IWL_INFO(iwl_trans, +		 "Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n", +		 REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id); +	IWL_INFO(iwl_trans, +		 "Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n", +		 cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id); +	IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n", +		 jacket_id_cnv, info->hw_cnv_id); + +out: +	return ret; +} + +static void iwl_pcie_recheck_me_status(struct work_struct *wk) +{ +	struct iwl_trans_pcie *trans_pcie = container_of(wk, +							 typeof(*trans_pcie), +							 me_recheck_wk.work); +	u32 val; + +	val = iwl_read32(trans_pcie->trans, CSR_HW_IF_CONFIG_REG); +	trans_pcie->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP); +} + +static void iwl_pcie_check_me_status(struct iwl_trans *trans) +{ +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); +	u32 val; + +	trans_pcie->me_present = -1; + +	INIT_DELAYED_WORK(&trans_pcie->me_recheck_wk, +			  iwl_pcie_recheck_me_status); + +	/* we don't have a good way of determining this until BZ */ +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ) +		return; + +	val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1); +	if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) { +		trans_pcie->me_present = +			!!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT); +		return; +	} + +	val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG); +	if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN | +		   CSR_HW_IF_CONFIG_REG_IAMT_UP)) { +		trans_pcie->me_present = 1; +		return; +	} + +	/* recheck again later, ME might still be initializing */ +	schedule_delayed_work(&trans_pcie->me_recheck_wk, HZ); +} + +int iwl_pci_gen1_2_probe(struct pci_dev *pdev, +			 const struct pci_device_id *ent, +			 const struct iwl_mac_cfg *mac_cfg, +			 u8 __iomem *hw_base, u32 hw_rev) +{ +	const struct iwl_dev_info *dev_info; +	struct iwl_trans_info info = { +		.hw_id = (pdev->device << 16) + pdev->subsystem_device, +		.hw_rev = hw_rev, +	}; +	struct iwl_trans *iwl_trans; +	struct iwl_trans_pcie *trans_pcie; +	int ret; + +	iwl_trans = iwl_trans_pcie_alloc(pdev, mac_cfg, &info, hw_base); +	if (IS_ERR(iwl_trans)) +		return PTR_ERR(iwl_trans); + +	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); + +	iwl_trans_pcie_check_product_reset_status(pdev); +	iwl_trans_pcie_check_product_reset_mode(pdev); + +	/* set the things we know so far for the grab NIC access */ +	iwl_trans_set_info(iwl_trans, &info); + +	/* +	 * Let's try to grab NIC access early here. Sometimes, NICs may +	 * fail to initialize, and if that happens it's better if we see +	 * issues early on (and can reprobe, per the logic inside), than +	 * first trying to load the firmware etc. and potentially only +	 * detecting any problems when the first interface is brought up. +	 */ +	ret = iwl_pcie_prepare_card_hw(iwl_trans); +	if (!ret) { +		ret = iwl_finish_nic_init(iwl_trans); +		if (ret) +			goto out_free_trans; +		if (iwl_trans_grab_nic_access(iwl_trans)) { +			get_crf_id(iwl_trans, &info); +			/* all good */ +			iwl_trans_release_nic_access(iwl_trans); +		} else { +			ret = -EIO; +			goto out_free_trans; +		} +	} + +	info.hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); + +	/* +	 * The RF_ID is set to zero in blank OTP so read version to +	 * extract the RF_ID. +	 * This is relevant only for family 9000 and up. +	 */ +	if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && +	    !CSR_HW_RFID_TYPE(info.hw_rf_id) && map_crf_id(iwl_trans, &info)) { +		ret = -EINVAL; +		goto out_free_trans; +	} + +	IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", +		 pdev->device, pdev->subsystem_device, +		 info.hw_rev, info.hw_rf_id); + +	dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, +					 CSR_HW_RFID_TYPE(info.hw_rf_id), +					 CSR_HW_RFID_IS_CDB(info.hw_rf_id), +					 IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), +					 IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device), +					 !iwl_trans->mac_cfg->integrated); +	if (dev_info) { +		iwl_trans->cfg = dev_info->cfg; +		info.name = dev_info->name; +	} + +#if IS_ENABLED(CONFIG_IWLMVM) + +	/* +	 * special-case 7265D, it has the same PCI IDs. +	 * +	 * Note that because we already pass the cfg to the transport above, +	 * all the parameters that the transport uses must, until that is +	 * changed, be identical to the ones in the 7265D configuration. +	 */ +	if (iwl_trans->cfg == &iwl7265_cfg && +	    (info.hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) +		iwl_trans->cfg = &iwl7265d_cfg; +#endif +	if (!iwl_trans->cfg) { +		pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", +		       pdev->device, pdev->subsystem_device, +		       info.hw_rev, info.hw_rf_id); +		ret = -EINVAL; +		goto out_free_trans; +	} + +	IWL_INFO(iwl_trans, "Detected %s\n", info.name); + +	if (iwl_trans->mac_cfg->mq_rx_supported) { +		if (WARN_ON(!iwl_trans->cfg->num_rbds)) { +			ret = -EINVAL; +			goto out_free_trans; +		} +		trans_pcie->num_rx_bufs = iwl_trans_get_num_rbds(iwl_trans); +	} else { +		trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; +	} + +	if (!iwl_trans->mac_cfg->integrated) { +		u16 link_status; + +		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status); + +		info.pcie_link_speed = +			u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS); +	} + +	iwl_trans_set_info(iwl_trans, &info); + +	pci_set_drvdata(pdev, iwl_trans); + +	iwl_pcie_check_me_status(iwl_trans); + +	/* try to get ownership so that we'll know if we don't own it */ +	iwl_pcie_prepare_card_hw(iwl_trans); + +	iwl_trans->drv = iwl_drv_start(iwl_trans); + +	if (IS_ERR(iwl_trans->drv)) { +		ret = PTR_ERR(iwl_trans->drv); +		goto out_free_trans; +	} + +	/* register transport layer debugfs here */ +	iwl_trans_pcie_dbgfs_register(iwl_trans); + +	return 0; + +out_free_trans: +	iwl_trans_pcie_free(iwl_trans); +	return ret; +} diff --git a/sys/contrib/dev/iwlwifi/pcie/tx-gen2.c b/sys/contrib/dev/iwlwifi/pcie/gen1_2/tx-gen2.c index 72be71184f22..4163d6518ec6 100644 --- a/sys/contrib/dev/iwlwifi/pcie/tx-gen2.c +++ b/sys/contrib/dev/iwlwifi/pcie/gen1_2/tx-gen2.c @@ -1,7 +1,7 @@  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause  /*   * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2023-2024 Intel Corporation + * Copyright (C) 2018-2020, 2023-2025 Intel Corporation   */  #ifdef CONFIG_INET  #include <net/tso.h> @@ -20,13 +20,12 @@  static struct page *get_workaround_page(struct iwl_trans *trans,  					struct sk_buff *skb)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_tso_page_info *info;  	struct page **page_ptr;  	struct page *ret;  	dma_addr_t phys; -	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); +	page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);  	ret = alloc_page(GFP_ATOMIC);  	if (!ret) @@ -166,7 +165,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,  				    struct iwl_device_tx_cmd *dev_cmd)  {  #ifdef CONFIG_INET -	struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; +	struct iwl_tx_cmd_v9 *tx_cmd = (void *)dev_cmd->payload;  	struct ieee80211_hdr *hdr = (void *)skb->data;  	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;  	unsigned int mss = skb_shinfo(skb)->gso_size; @@ -190,7 +189,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,  		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));  	/* Our device supports 9 segments at most, it will fit in 1 page */ -	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); +	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, +				snap_ip_tcp_hdrlen + hdr_len);  	if (!sgt)  		return -ENOMEM; @@ -349,6 +349,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,  	return tfd;  out_err: +	iwl_pcie_free_tso_pages(trans, skb, out_meta);  	iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);  	return NULL;  } @@ -491,21 +492,21 @@ struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,  	bool amsdu;  	/* There must be data left over for TB1 or this code must be changed */ -	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); +	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v9) < IWL_FIRST_TB_SIZE);  	BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + -		     offsetofend(struct iwl_tx_cmd_gen2, dram_info) > +		     offsetofend(struct iwl_tx_cmd_v9, dram_info) >  		     IWL_FIRST_TB_SIZE); -	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); +	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);  	BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + -		     offsetofend(struct iwl_tx_cmd_gen3, dram_info) > +		     offsetofend(struct iwl_tx_cmd, dram_info) >  		     IWL_FIRST_TB_SIZE);  	memset(tfd, 0, sizeof(*tfd)); -	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) -		len = sizeof(struct iwl_tx_cmd_gen2); +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) +		len = sizeof(struct iwl_tx_cmd_v9);  	else -		len = sizeof(struct iwl_tx_cmd_gen3); +		len = sizeof(struct iwl_tx_cmd);  	amsdu = ieee80211_is_data_qos(hdr->frame_control) &&  			(*ieee80211_get_qos_ctl(hdr) & @@ -536,17 +537,17 @@ int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)  	 * If q->n_window is smaller than max_tfd_queue_size, there is no need  	 * to reserve any queue entries for this purpose.  	 */ -	if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) +	if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size)  		max = q->n_window;  	else -		max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; +		max = trans->mac_cfg->base->max_tfd_queue_size - 1;  	/*  	 * max_tfd_queue_size is a power of 2, so the following is equivalent to  	 * modulo by max_tfd_queue_size and is well defined.  	 */  	used = (q->write_ptr - q->read_ptr) & -		(trans->trans_cfg->base_params->max_tfd_queue_size - 1); +		(trans->mac_cfg->base->max_tfd_queue_size - 1);  	if (WARN_ON(used > max))  		return 0; @@ -561,8 +562,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,  					  struct iwl_txq *txq, u16 byte_cnt,  					  int num_tbs)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); +	struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;  	u8 filled_tfd_size, num_fetch_chunks;  	u16 len = byte_cnt;  	__le16 bc_ent; @@ -582,24 +583,16 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,  	 */  	num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { -		struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; - -		/* Starting from AX210, the HW expects bytes */ -		WARN_ON(trans_pcie->txqs.bc_table_dword); +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {  		WARN_ON(len > 0x3FFF);  		bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); -		scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;  	} else { -		struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; - -		/* Before AX210, the HW expects DW */ -		WARN_ON(!trans_pcie->txqs.bc_table_dword);  		len = DIV_ROUND_UP(len, 4);  		WARN_ON(len > 0xFFF);  		bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); -		scd_bc_tbl->tfd_offset[idx] = bc_ent;  	} + +	scd_bc_tbl[idx].tfd_offset = bc_ent;  }  static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd) @@ -756,7 +749,8 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,  			struct iwl_device_tx_cmd **dev_cmd_ptr;  			dev_cmd_ptr = (void *)((u8 *)skb->cb + -					       trans_pcie->txqs.dev_cmd_offs); +					       trans->conf.cb_data_offs + +					       sizeof(void *));  			*dev_cmd_ptr = dev_cmd;  			__skb_queue_tail(&txq->overflow_q, skb); @@ -785,16 +779,16 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,  		return -1;  	} -	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { -		struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { +		struct iwl_tx_cmd *tx_cmd =  			(void *)dev_cmd->payload; -		cmd_len = le16_to_cpu(tx_cmd_gen3->len); +		cmd_len = le16_to_cpu(tx_cmd->len);  	} else { -		struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = +		struct iwl_tx_cmd_v9 *tx_cmd_v9 =  			(void *)dev_cmd->payload; -		cmd_len = le16_to_cpu(tx_cmd_gen2->len); +		cmd_len = le16_to_cpu(tx_cmd_v9->len);  	}  	/* Set up entry for this TFD in Tx byte-count array */ @@ -832,7 +826,7 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)  		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",  				   txq_id, txq->read_ptr); -		if (txq_id != trans_pcie->txqs.cmd.q_id) { +		if (txq_id != trans->conf.cmd_queue) {  			int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);  			struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;  			struct sk_buff *skb = txq->entries[idx].skb; @@ -906,12 +900,12 @@ static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)  	iwl_txq_gen2_unmap(trans, txq_id);  	/* De-alloc array of command/tx buffers */ -	if (txq_id == trans_pcie->txqs.cmd.q_id) +	if (txq_id == trans->conf.cmd_queue)  		for (i = 0; i < txq->n_window; i++) {  			kfree_sensitive(txq->entries[i].cmd);  			kfree_sensitive(txq->entries[i].free_buf);  		} -	del_timer_sync(&txq->stuck_timer); +	timer_delete_sync(&txq->stuck_timer);  	iwl_txq_gen2_free_memory(trans, txq); @@ -1007,7 +1001,7 @@ static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,  	txq->id = qid;  	trans_pcie->txqs.txq[qid] = txq; -	wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); +	wr_ptr &= (trans->mac_cfg->base->max_tfd_queue_size - 1);  	/* Place first TFD at index corresponding to start sequence number */  	txq->read_ptr = wr_ptr; @@ -1043,8 +1037,8 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,  	/* but must be power of 2 values for calculating read/write pointers */  	size = rounddown_pow_of_two(size); -	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && -	    trans->hw_rev_step == SILICON_A_STEP) { +	if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_BZ && +	    trans->info.hw_rev_step == SILICON_A_STEP) {  		size = 4096;  		txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);  	} else { @@ -1064,7 +1058,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,  	if (IS_ERR(txq))  		return PTR_ERR(txq); -	if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) { +	if (trans->conf.queue_alloc_cmd_ver == 0) {  		memset(&cmd.old, 0, sizeof(cmd.old));  		cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);  		cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); @@ -1081,7 +1075,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,  		hcmd.id = SCD_QUEUE_CFG;  		hcmd.len[0] = sizeof(cmd.old);  		hcmd.data[0] = &cmd.old; -	} else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) { +	} else if (trans->conf.queue_alloc_cmd_ver == 3) {  		memset(&cmd.new, 0, sizeof(cmd.new));  		cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);  		cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); @@ -1176,7 +1170,7 @@ int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)  	}  	ret = iwl_txq_init(trans, queue, queue_size, -			   (txq_id == trans_pcie->txqs.cmd.q_id)); +			   (txq_id == trans->conf.cmd_queue));  	if (ret) {  		IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);  		goto error; @@ -1206,7 +1200,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,  			       struct iwl_host_cmd *cmd)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; +	struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];  	struct iwl_device_cmd *out_cmd;  	struct iwl_cmd_meta *out_meta;  	void *dup_buf = NULL; @@ -1300,7 +1294,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,  		spin_unlock_irqrestore(&txq->lock, flags);  		IWL_ERR(trans, "No space in command queue\n"); -		iwl_op_mode_cmd_queue_full(trans->op_mode); +		iwl_op_mode_nic_error(trans->op_mode, +				      IWL_ERR_TYPE_CMD_QUEUE_FULL); +		iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);  		idx = -ENOSPC;  		goto free_dup_buf;  	} @@ -1321,7 +1317,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,  		cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));  	out_cmd->hdr_wide.reserved = 0;  	out_cmd->hdr_wide.sequence = -		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | +		cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |  					 INDEX_TO_SEQ(txq->write_ptr));  	cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1369,7 +1365,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,  		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",  		     iwl_get_cmd_string(trans, cmd->id), group_id,  		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), -		     cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); +		     cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);  	/* start the TFD with the minimum copy bytes */  	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); diff --git a/sys/contrib/dev/iwlwifi/pcie/tx.c b/sys/contrib/dev/iwlwifi/pcie/gen1_2/tx.c index e2b8165aead6..5d62d19ad3cf 100644 --- a/sys/contrib/dev/iwlwifi/pcie/tx.c +++ b/sys/contrib/dev/iwlwifi/pcie/gen1_2/tx.c @@ -1,6 +1,6 @@  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause  /* - * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation   * Copyright (C) 2013-2015 Intel Mobile Communications GmbH   * Copyright (C) 2016-2017 Intel Deutschland GmbH   */ @@ -30,6 +30,8 @@  #include "iwl-op-mode.h"  #include "internal.h"  #include "fw/api/tx.h" +#include "fw/dbg.h" +#include "pcie/utils.h"  /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****   * DMA services @@ -83,7 +85,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)  static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,  				    struct iwl_txq *txq)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	u32 reg = 0;  	int txq_id = txq->id; @@ -95,8 +96,8 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,  	 * 2. NIC is woken up for CMD regardless of shadow outside this function  	 * 3. there is a chance that the NIC is asleep  	 */ -	if (!trans->trans_cfg->base_params->shadow_reg_enable && -	    txq_id != trans_pcie->txqs.cmd.q_id && +	if (!trans->mac_cfg->base->shadow_reg_enable && +	    txq_id != trans->conf.cmd_queue &&  	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {  		/*  		 * wake up nic if it's powered down ... @@ -130,7 +131,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int i; -	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { +	for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {  		struct iwl_txq *txq = trans_pcie->txqs.txq[i];  		if (!test_bit(i, trans_pcie->txqs.queue_used)) @@ -198,7 +199,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	if (!trans->trans_cfg->base_params->apmg_wake_up_wa) +	if (!trans->mac_cfg->base->apmg_wake_up_wa)  		return;  	spin_lock(&trans_pcie->reg_lock); @@ -209,8 +210,8 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)  	}  	trans_pcie->cmd_hold_nic_awake = false; -	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, -				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +	iwl_trans_clear_bit(trans, CSR_GP_CNTRL, +			    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);  	spin_unlock(&trans_pcie->reg_lock);  } @@ -231,11 +232,10 @@ static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,  void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,  			     struct iwl_cmd_meta *cmd_meta)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct page **page_ptr;  	struct page *next; -	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); +	page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);  	next = *page_ptr;  	*page_ptr = NULL; @@ -285,10 +285,12 @@ iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)  static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,  					 struct iwl_tfd *tfd)  { +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); +  	tfd->num_tbs = 0; -	iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma, -				 trans->invalid_tx_cmd.size); +	iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma, +				 trans_pcie->invalid_tx_cmd.size);  }  static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, @@ -360,7 +362,7 @@ static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,  	/* We have only q->n_window txq->entries, but we use  	 * TFD_QUEUE_SIZE_MAX tfds  	 */ -	if (trans->trans_cfg->gen2) +	if (trans->mac_cfg->gen2)  		iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,  				       iwl_txq_get_tfd(trans, txq, read_ptr));  	else @@ -399,7 +401,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)  		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",  				   txq_id, txq->read_ptr); -		if (txq_id != trans_pcie->txqs.cmd.q_id) { +		if (txq_id != trans->conf.cmd_queue) {  			struct sk_buff *skb = txq->entries[txq->read_ptr].skb;  			struct iwl_cmd_meta *cmd_meta =  				&txq->entries[txq->read_ptr].meta; @@ -413,7 +415,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)  		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);  		if (txq->read_ptr == txq->write_ptr && -		    txq_id == trans_pcie->txqs.cmd.q_id) +		    txq_id == trans->conf.cmd_queue)  			iwl_pcie_clear_cmd_in_flight(trans);  	} @@ -451,7 +453,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)  	iwl_pcie_txq_unmap(trans, txq_id);  	/* De-alloc array of command/tx buffers */ -	if (txq_id == trans_pcie->txqs.cmd.q_id) +	if (txq_id == trans->conf.cmd_queue)  		for (i = 0; i < txq->n_window; i++) {  			kfree_sensitive(txq->entries[i].cmd);  			kfree_sensitive(txq->entries[i].free_buf); @@ -461,7 +463,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)  	if (txq->tfds) {  		dma_free_coherent(dev,  				  trans_pcie->txqs.tfd.size * -				  trans->trans_cfg->base_params->max_tfd_queue_size, +				  trans->mac_cfg->base->max_tfd_queue_size,  				  txq->tfds, txq->dma_addr);  		txq->dma_addr = 0;  		txq->tfds = NULL; @@ -474,16 +476,16 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)  	kfree(txq->entries);  	txq->entries = NULL; -	del_timer_sync(&txq->stuck_timer); +	timer_delete_sync(&txq->stuck_timer);  	/* 0-fill queue descriptor structure */  	memset(txq, 0, sizeof(*txq));  } -void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) +void iwl_pcie_tx_start(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	int nq = trans->trans_cfg->base_params->num_of_queues; +	int nq = trans->mac_cfg->base->num_of_queues;  	int chan;  	u32 reg_val;  	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - @@ -498,9 +500,6 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)  	trans_pcie->scd_base_addr =  		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); -	WARN_ON(scd_base_addr != 0 && -		scd_base_addr != trans_pcie->scd_base_addr); -  	/* reset context data, TX status and translation data */  	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +  				   SCD_CONTEXT_MEM_LOWER_BOUND, @@ -512,12 +511,12 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)  	/* The chain extension of the SCD doesn't work well. This feature is  	 * enabled by default by the HW, so we need to disable it manually.  	 */ -	if (trans->trans_cfg->base_params->scd_chain_ext_wa) +	if (trans->mac_cfg->base->scd_chain_ext_wa)  		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); -	iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, -				trans_pcie->txqs.cmd.fifo, -				trans_pcie->txqs.cmd.wdg_timeout); +	iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue, +				trans->conf.cmd_fifo, +				IWL_DEF_WD_TIMEOUT);  	/* Activate all Tx DMA/FIFO channels */  	iwl_scd_activate_fifos(trans); @@ -534,7 +533,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)  			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);  	/* Enable L1-Active */ -	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) +	if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)  		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,  				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);  } @@ -548,13 +547,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)  	 * we should never get here in gen2 trans mode return early to avoid  	 * having invalid accesses  	 */ -	if (WARN_ON_ONCE(trans->trans_cfg->gen2)) +	if (WARN_ON_ONCE(trans->mac_cfg->gen2))  		return; -	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; +	for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;  	     txq_id++) {  		struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; -		if (trans->trans_cfg->gen2) +		if (trans->mac_cfg->gen2)  			iwl_write_direct64(trans,  					   FH_MEM_CBBC_QUEUE(trans, txq_id),  					   txq->dma_addr); @@ -576,7 +575,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)  	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will  	 * contain garbage.  	 */ -	iwl_pcie_tx_start(trans, 0); +	iwl_pcie_tx_start(trans);  }  static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) @@ -597,8 +596,8 @@ static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)  	}  	/* Wait for DMA channels to be idle */ -	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); -	if (ret < 0) +	ret = iwl_poll_bits(trans, FH_TSSR_TX_STATUS_REG, mask, 5000); +	if (ret)  		IWL_ERR(trans,  			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",  			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); @@ -638,7 +637,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)  		return 0;  	/* Unmap DMA from host system and free skb's */ -	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; +	for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;  	     txq_id++)  		iwl_pcie_txq_unmap(trans, txq_id); @@ -661,7 +660,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)  	/* Tx queues */  	if (trans_pcie->txq_memory) {  		for (txq_id = 0; -		     txq_id < trans->trans_cfg->base_params->num_of_queues; +		     txq_id < trans->mac_cfg->base->num_of_queues;  		     txq_id++) {  			iwl_pcie_txq_free(trans, txq_id);  			trans_pcie->txqs.txq[txq_id] = NULL; @@ -683,7 +682,7 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)  	bool active;  	u8 fifo; -	if (trans->trans_cfg->gen2) { +	if (trans->mac_cfg->gen2) {  		IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,  			txq->read_ptr, txq->write_ptr);  		/* TODO: access new SCD registers and dump them */ @@ -700,15 +699,15 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)  		jiffies_to_msecs(txq->wd_timeout),  		txq->read_ptr, txq->write_ptr,  		iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & -			(trans->trans_cfg->base_params->max_tfd_queue_size - 1), +			(trans->mac_cfg->base->max_tfd_queue_size - 1),  			iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & -			(trans->trans_cfg->base_params->max_tfd_queue_size - 1), +			(trans->mac_cfg->base->max_tfd_queue_size - 1),  			iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));  }  static void iwl_txq_stuck_timer(struct timer_list *t)  { -	struct iwl_txq *txq = from_timer(txq, t, stuck_timer); +	struct iwl_txq *txq = timer_container_of(txq, t, stuck_timer);  	struct iwl_trans *trans = txq->trans;  	spin_lock(&txq->lock); @@ -728,8 +727,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,  		       int slots_num, bool cmd_queue)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	size_t num_entries = trans->trans_cfg->gen2 ? -		slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; +	size_t num_entries = trans->mac_cfg->gen2 ? +		slots_num : trans->mac_cfg->base->max_tfd_queue_size;  	size_t tfd_sz;  	size_t tb0_buf_sz;  	int i; @@ -784,7 +783,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,  	for (i = 0; i < num_entries; i++) {  		void *tfd = iwl_txq_get_tfd(trans, txq, i); -		if (trans->trans_cfg->gen2) +		if (trans->mac_cfg->gen2)  			iwl_txq_set_tfd_invalid_gen2(trans, tfd);  		else  			iwl_txq_set_tfd_invalid_gen1(trans, tfd); @@ -804,6 +803,8 @@ error:  	return -ENOMEM;  } +#define BC_TABLE_SIZE	(sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE) +  /*   * iwl_pcie_tx_alloc - allocate TX context   * Allocate all Tx DMA structures and initialize them @@ -813,12 +814,12 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)  	int ret;  	int txq_id, slots_num;  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; +	u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues; -	if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) +	if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))  		return -EINVAL; -	bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); +	bc_tbls_size *= BC_TABLE_SIZE;  	/*It is not allowed to alloc twice, so warn when this happens.  	 * We cannot rely on the previous allocation, so free and fail */ @@ -842,7 +843,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)  	}  	trans_pcie->txq_memory = -		kcalloc(trans->trans_cfg->base_params->num_of_queues, +		kcalloc(trans->mac_cfg->base->num_of_queues,  			sizeof(struct iwl_txq), GFP_KERNEL);  	if (!trans_pcie->txq_memory) {  		IWL_ERR(trans, "Not enough memory for txq\n"); @@ -851,16 +852,16 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)  	}  	/* Alloc and init all Tx queues, including the command queue (#4/#9) */ -	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; +	for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;  	     txq_id++) { -		bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); +		bool cmd_queue = (txq_id == trans->conf.cmd_queue);  		if (cmd_queue)  			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, -					  trans->cfg->min_txq_size); +					  trans->mac_cfg->base->min_txq_size);  		else  			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, -					  trans->cfg->min_ba_txq_size); +					  trans->mac_cfg->base->min_ba_txq_size);  		trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];  		ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],  					 slots_num, cmd_queue); @@ -910,7 +911,7 @@ int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,  		 int slots_num, bool cmd_queue)  {  	u32 tfd_queue_max_size = -		trans->trans_cfg->base_params->max_tfd_queue_size; +		trans->mac_cfg->base->max_tfd_queue_size;  	int ret;  	txq->need_update = false; @@ -970,16 +971,16 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)  	spin_unlock_bh(&trans_pcie->irq_lock);  	/* Alloc and init all Tx queues, including the command queue (#4/#9) */ -	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; +	for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;  	     txq_id++) { -		bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); +		bool cmd_queue = (txq_id == trans->conf.cmd_queue);  		if (cmd_queue)  			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, -					  trans->cfg->min_txq_size); +					  trans->mac_cfg->base->min_txq_size);  		else  			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, -					  trans->cfg->min_ba_txq_size); +					  trans->mac_cfg->base->min_ba_txq_size);  		ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,  				   cmd_queue);  		if (ret) { @@ -998,7 +999,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)  	}  	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); -	if (trans->trans_cfg->base_params->num_of_queues > 20) +	if (trans->mac_cfg->base->num_of_queues > 20)  		iwl_set_bits_prph(trans, SCD_GP_CTRL,  				  SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1019,7 +1020,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,  	if (test_bit(STATUS_TRANS_DEAD, &trans->status))  		return -ENODEV; -	if (!trans->trans_cfg->base_params->apmg_wake_up_wa) +	if (!trans->mac_cfg->base->apmg_wake_up_wa)  		return 0;  	/* @@ -1028,7 +1029,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,  	 * returned. This needs to be done only on NICs that have  	 * apmg_wake_up_wa set (see above.)  	 */ -	if (!_iwl_trans_pcie_grab_nic_access(trans)) +	if (!_iwl_trans_pcie_grab_nic_access(trans, false))  		return -EIO;  	/* @@ -1061,7 +1062,7 @@ static void iwl_txq_progress(struct iwl_txq *txq)  	 * since we're making progress on this queue  	 */  	if (txq->read_ptr == txq->write_ptr) -		del_timer(&txq->stuck_timer); +		timer_delete(&txq->stuck_timer);  	else  		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);  } @@ -1097,12 +1098,12 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)  	idx = iwl_txq_get_cmd_index(txq, idx);  	r = iwl_txq_get_cmd_index(txq, txq->read_ptr); -	if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || +	if (idx >= trans->mac_cfg->base->max_tfd_queue_size ||  	    (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {  		WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),  			  "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",  			  __func__, txq_id, idx, -			  trans->trans_cfg->base_params->max_tfd_queue_size, +			  trans->mac_cfg->base->max_tfd_queue_size,  			  txq->write_ptr, txq->read_ptr);  		return;  	} @@ -1171,15 +1172,15 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,  		fifo = cfg->fifo;  		/* Disable the scheduler prior configuring the cmd queue */ -		if (txq_id == trans_pcie->txqs.cmd.q_id && -		    trans_pcie->scd_set_active) +		if (txq_id == trans->conf.cmd_queue && +		    trans->conf.scd_set_active)  			iwl_scd_enable_set_active(trans, 0);  		/* Stop this Tx queue before configuring it */  		iwl_scd_txq_set_inactive(trans, txq_id);  		/* Set this queue as a chain-building queue unless it is CMD */ -		if (txq_id != trans_pcie->txqs.cmd.q_id) +		if (txq_id != trans->conf.cmd_queue)  			iwl_scd_txq_set_chain(trans, txq_id);  		if (cfg->aggregate) { @@ -1213,7 +1214,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,  		 * this sad hardware issue.  		 * This bug has been fixed on devices 9000 and up.  		 */ -		scd_bug = !trans->trans_cfg->mq_rx_supported && +		scd_bug = !trans->mac_cfg->mq_rx_supported &&  			!((ssn - txq->write_ptr) & 0x3f) &&  			(ssn != txq->write_ptr);  		if (scd_bug) @@ -1249,8 +1250,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,  			       SCD_QUEUE_STTS_REG_MSK);  		/* enable the scheduler for this queue (only) */ -		if (txq_id == trans_pcie->txqs.cmd.q_id && -		    trans_pcie->scd_set_active) +		if (txq_id == trans->conf.cmd_queue && +		    trans->conf.scd_set_active)  			iwl_scd_enable_set_active(trans, BIT(txq_id));  		IWL_DEBUG_TX_QUEUES(trans, @@ -1317,10 +1318,10 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int i; -	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { +	for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {  		struct iwl_txq *txq = trans_pcie->txqs.txq[i]; -		if (i == trans_pcie->txqs.cmd.q_id) +		if (i == trans->conf.cmd_queue)  			continue;  		/* we skip the command queue (obviously) so it's OK to nest */ @@ -1353,7 +1354,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,  			  struct iwl_host_cmd *cmd)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; +	struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];  	struct iwl_device_cmd *out_cmd;  	struct iwl_cmd_meta *out_meta;  	void *dup_buf = NULL; @@ -1368,7 +1369,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,  	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];  	unsigned long flags; -	if (WARN(!trans->wide_cmd_header && +	if (WARN(!trans->conf.wide_cmd_header &&  		 group_id > IWL_ALWAYS_LONG_GROUP,  		 "unsupported wide command %#x\n", cmd->id))  		return -EINVAL; @@ -1456,7 +1457,9 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,  		spin_unlock_irqrestore(&txq->lock, flags);  		IWL_ERR(trans, "No space in command queue\n"); -		iwl_op_mode_cmd_queue_full(trans->op_mode); +		iwl_op_mode_nic_error(trans->op_mode, +				      IWL_ERR_TYPE_CMD_QUEUE_FULL); +		iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);  		idx = -ENOSPC;  		goto free_dup_buf;  	} @@ -1480,7 +1483,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,  				    sizeof(struct iwl_cmd_header_wide));  		out_cmd->hdr_wide.reserved = 0;  		out_cmd->hdr_wide.sequence = -			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | +			cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |  						 INDEX_TO_SEQ(txq->write_ptr));  		cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1488,7 +1491,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,  	} else {  		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);  		out_cmd->hdr.sequence = -			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | +			cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |  						 INDEX_TO_SEQ(txq->write_ptr));  		out_cmd->hdr.group_id = 0; @@ -1539,7 +1542,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,  		     iwl_get_cmd_string(trans, cmd->id),  		     group_id, out_cmd->hdr.cmd,  		     le16_to_cpu(out_cmd->hdr.sequence), -		     cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); +		     cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);  	/* start the TFD with the minimum copy bytes */  	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); @@ -1638,18 +1641,16 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,  	struct iwl_device_cmd *cmd;  	struct iwl_cmd_meta *meta;  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; +	struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];  	/* If a Tx command is being handled and it isn't in the actual  	 * command queue then there a command routing bug has been introduced  	 * in the queue management code. */ -	if (WARN(txq_id != trans_pcie->txqs.cmd.q_id, -		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", -		 txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr, -		 txq->write_ptr)) { -		iwl_print_hex_error(trans, pkt, 32); +	if (IWL_FW_CHECK(trans, txq_id != trans->conf.cmd_queue, +			 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d pkt=%*phN\n", +			 txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr, +			 txq->write_ptr, 32, pkt))  		return; -	}  	spin_lock_bh(&txq->lock); @@ -1659,7 +1660,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,  	group_id = cmd->hdr.group_id;  	cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); -	if (trans->trans_cfg->gen2) +	if (trans->mac_cfg->gen2)  		iwl_txq_gen2_tfd_unmap(trans, meta,  				       iwl_txq_get_tfd(trans, txq, index));  	else @@ -1692,7 +1693,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,  		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);  		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",  			       iwl_get_cmd_string(trans, cmd_id)); -		wake_up(&trans->wait_command_queue); +		wake_up(&trans_pcie->wait_command_queue);  	}  	meta->flags = 0; @@ -1762,7 +1763,7 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,  	dma_addr_t phys;  	void *ret; -	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); +	page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);  	if (WARN_ON(*page_ptr))  		return NULL; @@ -1864,6 +1865,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,   * @cmd_meta: command meta to store the scatter list information for unmapping   * @hdr: output argument for TSO headers   * @hdr_room: requested length for TSO headers + * @offset: offset into the data from which mapping should start   *   * Allocate space for a scatter gather list and TSO headers and map the SKB   * using the scatter gather list. The SKB is unmapped again when the page is @@ -1873,9 +1875,12 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,   */  struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,  				   struct iwl_cmd_meta *cmd_meta, -				   u8 **hdr, unsigned int hdr_room) +				   u8 **hdr, unsigned int hdr_room, +				   unsigned int offset)  {  	struct sg_table *sgt; +	unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1; +	int orig_nents;  	if (WARN_ON_ONCE(skb_has_frag_list(skb)))  		return NULL; @@ -1883,8 +1888,7 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,  	*hdr = iwl_pcie_get_page_hdr(trans,  				     hdr_room + __alignof__(struct sg_table) +  				     sizeof(struct sg_table) + -				     (skb_shinfo(skb)->nr_frags + 1) * -				     sizeof(struct scatterlist), +				     n_segments * sizeof(struct scatterlist),  				     skb);  	if (!*hdr)  		return NULL; @@ -1892,14 +1896,15 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,  	sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));  	sgt->sgl = (void *)(sgt + 1); -	sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); +	sg_init_table(sgt->sgl, n_segments);  	/* Only map the data, not the header (it is copied to the TSO page) */ -	sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb), -				       skb->data_len); -	if (WARN_ON_ONCE(sgt->orig_nents <= 0)) +	orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset); +	if (WARN_ON_ONCE(orig_nents <= 0))  		return NULL; +	sgt->orig_nents = orig_nents; +  	/* And map the entire SKB */  	if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)  		return NULL; @@ -1917,7 +1922,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,  				   u16 tb1_len)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; +	struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;  	struct ieee80211_hdr *hdr = (void *)skb->data;  	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;  	unsigned int mss = skb_shinfo(skb)->gso_size; @@ -1948,7 +1953,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,  		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;  	/* Our device supports 9 segments at most, it will fit in 1 page */ -	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); +	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, +				snap_ip_tcp_hdrlen + hdr_len + iv_len);  	if (!sgt)  		return -ENOMEM; @@ -1968,7 +1974,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,  	 * have in the MPDU by themselves, but that we duplicate into  	 * all the different MSDUs inside the A-MSDU.  	 */ -	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); +	le16_add_cpu(&tx_cmd->params.len, -snap_ip_tcp_hdrlen);  	tso_start(skb, &tso); @@ -2011,7 +2017,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,  		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,  					hdr_tb_phys, hdr_tb_len);  		/* add this subframe's headers' length to the tx_cmd */ -		le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start); +		le16_add_cpu(&tx_cmd->params.len, pos_hdr - subf_hdrs_start);  		/* prepare the start_hdr for the next subframe */  		start_hdr = pos_hdr; @@ -2071,19 +2077,19 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,  					     int num_tbs)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct iwlagn_scd_bc_tbl *scd_bc_tbl; +	struct iwl_bc_tbl_entry *scd_bc_tbl;  	int write_ptr = txq->write_ptr;  	int txq_id = txq->id;  	u8 sec_ctl = 0;  	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;  	__le16 bc_ent;  	struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; -	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; -	u8 sta_id = tx_cmd->sta_id; +	struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload; +	u8 sta_id = tx_cmd->params.sta_id;  	scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; -	sec_ctl = tx_cmd->sec_ctl; +	sec_ctl = tx_cmd->params.sec_ctl;  	switch (sec_ctl & TX_CMD_SEC_MSK) {  	case TX_CMD_SEC_CCM: @@ -2096,7 +2102,9 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,  		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;  		break;  	} -	if (trans_pcie->txqs.bc_table_dword) + +	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_7000 && +	    trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)  		len = DIV_ROUND_UP(len, 4);  	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) @@ -2104,10 +2112,10 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,  	bc_ent = cpu_to_le16(len | (sta_id << 12)); -	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; +	scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + write_ptr].tfd_offset = bc_ent;  	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) -		scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = +		scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =  			bc_ent;  } @@ -2116,7 +2124,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct ieee80211_hdr *hdr; -	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; +	struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;  	struct iwl_cmd_meta *out_meta;  	struct iwl_txq *txq;  	dma_addr_t tb0_phys, tb1_phys, scratch_phys; @@ -2157,7 +2165,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,  			struct iwl_device_tx_cmd **dev_cmd_ptr;  			dev_cmd_ptr = (void *)((u8 *)skb->cb + -					       trans_pcie->txqs.dev_cmd_offs); +					       trans->conf.cb_data_offs + +					       sizeof(void *));  			*dev_cmd_ptr = dev_cmd;  			__skb_queue_tail(&txq->overflow_q, skb); @@ -2188,10 +2197,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,  	tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);  	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + -		       offsetof(struct iwl_tx_cmd, scratch); +		       offsetof(struct iwl_tx_cmd_v6_params, scratch); -	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); -	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); +	tx_cmd->params.dram_lsb_ptr = cpu_to_le32(scratch_phys); +	tx_cmd->params.dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);  	/* Set up first empty entry in queue's array of Tx/cmd buffers */  	out_meta = &txq->entries[txq->write_ptr].meta; @@ -2203,7 +2212,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,  	 * (This calculation modifies the TX command, so do it before the  	 * setup of the first TB)  	 */ -	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + +	len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) +  	      hdr_len - IWL_FIRST_TB_SIZE;  	/* do not align A-MSDU to dword as the subframe header aligns it */  	amsdu = ieee80211_is_data_qos(fc) && @@ -2213,7 +2222,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,  		tb1_len = ALIGN(len, 4);  		/* Tell NIC about any 2-byte padding after MAC header */  		if (tb1_len != len) -			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); +			tx_cmd->params.tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);  	} else {  		tb1_len = len;  	} @@ -2226,9 +2235,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,  			       IWL_FIRST_TB_SIZE, true);  	/* there must be data left over for TB1 or this code must be changed */ -	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); +	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE);  	BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + -		     offsetofend(struct iwl_tx_cmd, scratch) > +		     offsetofend(struct iwl_tx_cmd_v6_params, scratch) >  		     IWL_FIRST_TB_SIZE);  	/* map the data for TB1 */ @@ -2274,7 +2283,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,  	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);  	/* Set up entry for this TFD in Tx byte-count array */ -	iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), +	iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->params.len),  					 iwl_txq_gen1_tfd_get_num_tbs(tfd));  	wait_write_ptr = ieee80211_has_morefrags(fc); @@ -2316,24 +2325,24 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,  					    int read_ptr)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; +	struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;  	int txq_id = txq->id;  	u8 sta_id = 0;  	__le16 bc_ent;  	struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; -	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; +	struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;  	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); -	if (txq_id != trans_pcie->txqs.cmd.q_id) -		sta_id = tx_cmd->sta_id; +	if (txq_id != trans->conf.cmd_queue) +		sta_id = tx_cmd->params.sta_id;  	bc_ent = cpu_to_le16(1 | (sta_id << 12)); -	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; +	scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + read_ptr].tfd_offset = bc_ent;  	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) -		scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = +		scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =  			bc_ent;  } @@ -2347,7 +2356,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,  	int txq_read_ptr, txq_write_ptr;  	/* This function is not meant to release cmd queue*/ -	if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id)) +	if (WARN_ON(txq_id == trans->conf.cmd_queue))  		return;  	if (WARN_ON(!txq)) @@ -2362,6 +2371,10 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,  	txq_write_ptr = txq->write_ptr;  	spin_unlock(&txq->lock); +	/* There is nothing to do if we are flushing an empty queue */ +	if (is_flush && txq_write_ptr == txq_read_ptr) +		goto out; +  	read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);  	if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) { @@ -2385,7 +2398,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,  		IWL_ERR(trans,  			"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",  			__func__, txq_id, last_to_free, -			trans->trans_cfg->base_params->max_tfd_queue_size, +			trans->mac_cfg->base->max_tfd_queue_size,  			txq_write_ptr, txq_read_ptr);  		iwl_op_mode_time_point(trans->op_mode, @@ -2414,7 +2427,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,  		txq->entries[read_ptr].skb = NULL; -		if (!trans->trans_cfg->gen2) +		if (!trans->mac_cfg->gen2)  			iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,  							txq_read_ptr); @@ -2456,7 +2469,8 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,  			struct iwl_device_tx_cmd *dev_cmd_ptr;  			dev_cmd_ptr = *(void **)((u8 *)skb->cb + -						 trans_pcie->txqs.dev_cmd_offs); +						 trans->conf.cb_data_offs + +						 sizeof(void *));  			/*  			 * Note that we can very well be overflowing again. @@ -2529,7 +2543,7 @@ void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,  			/* remember how long until the timer fires */  			txq->frozen_expiry_remainder =  				txq->stuck_timer.expires - now; -			del_timer(&txq->stuck_timer); +			timer_delete(&txq->stuck_timer);  			goto next_queue;  		} @@ -2548,11 +2562,11 @@ next_queue:  #define HOST_COMPLETE_TIMEOUT	(2 * HZ)  static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, -					 struct iwl_host_cmd *cmd) +					 struct iwl_host_cmd *cmd, +					 const char *cmd_str)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); -	struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; +	struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];  	int cmd_idx;  	int ret; @@ -2565,7 +2579,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,  	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); -	if (trans->trans_cfg->gen2) +	if (trans->mac_cfg->gen2)  		cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);  	else  		cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); @@ -2578,7 +2592,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,  		return ret;  	} -	ret = wait_event_timeout(trans->wait_command_queue, +	ret = wait_event_timeout(trans_pcie->wait_command_queue,  				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,  					   &trans->status),  				 HOST_COMPLETE_TIMEOUT); @@ -2594,7 +2608,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,  			       cmd_str);  		ret = -ETIMEDOUT; -		iwl_trans_sync_nmi(trans); +		iwl_trans_pcie_sync_nmi(trans);  		goto cancel;  	} @@ -2645,6 +2659,8 @@ cancel:  int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,  			     struct iwl_host_cmd *cmd)  { +	const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); +  	/* Make sure the NIC is still alive in the bus */  	if (test_bit(STATUS_TRANS_DEAD, &trans->status))  		return -ENODEV; @@ -2656,20 +2672,16 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,  		return -ERFKILL;  	} -	if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && -		     !(cmd->flags & CMD_SEND_IN_D3))) { -		IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); -		return -EHOSTDOWN; -	} -  	if (cmd->flags & CMD_ASYNC) {  		int ret; +		IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str); +  		/* An asynchronous command can not expect an SKB to be set. */  		if (WARN_ON(cmd->flags & CMD_WANT_SKB))  			return -EINVAL; -		if (trans->trans_cfg->gen2) +		if (trans->mac_cfg->gen2)  			ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);  		else  			ret = iwl_pcie_enqueue_hcmd(trans, cmd); @@ -2683,6 +2695,5 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,  		return 0;  	} -	return iwl_trans_pcie_send_hcmd_sync(trans, cmd); +	return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str);  } -IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd); diff --git a/sys/contrib/dev/iwlwifi/pcie/iwl-context-info-v2.h b/sys/contrib/dev/iwlwifi/pcie/iwl-context-info-v2.h new file mode 100644 index 000000000000..416baadc5017 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/pcie/iwl-context-info-v2.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2018, 2020-2025 Intel Corporation + */ +#ifndef __iwl_context_info_file_v2_h__ +#define __iwl_context_info_file_v2_h__ + +#include "iwl-context-info.h" + +#define CSR_CTXT_INFO_BOOT_CTRL         0x0 +#define CSR_CTXT_INFO_ADDR              0x118 +#define CSR_IML_DATA_ADDR               0x120 +#define CSR_IML_SIZE_ADDR               0x128 +#define CSR_IML_RESP_ADDR               0x12c + +#define UNFRAGMENTED_PNVM_PAYLOADS_NUMBER 2 + +/* Set bit for enabling automatic function boot */ +#define CSR_AUTO_FUNC_BOOT_ENA          BIT(1) +/* Set bit for initiating function boot */ +#define CSR_AUTO_FUNC_INIT              BIT(7) + +/** + * enum iwl_prph_scratch_mtr_format - tfd size configuration + * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd + * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd + * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd + * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd + */ +enum iwl_prph_scratch_mtr_format { +	IWL_PRPH_MTR_FORMAT_16B = 0x0, +	IWL_PRPH_MTR_FORMAT_32B = 0x40000, +	IWL_PRPH_MTR_FORMAT_64B = 0x80000, +	IWL_PRPH_MTR_FORMAT_256B = 0xC0000, +}; + +/** + * enum iwl_prph_scratch_flags - PRPH scratch control flags + * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug + * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf + * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated + *	in hwm config. + * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM + * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for + *	multicomm. + * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW + * @IWL_PRPH_SCRATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) + * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for + *	completion descriptor, 1 for responses (legacy) + * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. + *	There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit, + *	3: 256 bit. + * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored + *	by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K + *	appropriately; use the below values for this. + * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size + * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size + * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size + * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE + *	upon reset. + * @IWL_PRPH_SCRATCH_TOP_RESET: request TOP reset + */ +enum iwl_prph_scratch_flags { +	IWL_PRPH_SCRATCH_IMR_DEBUG_EN		= BIT(1), +	IWL_PRPH_SCRATCH_EARLY_DEBUG_EN		= BIT(4), +	IWL_PRPH_SCRATCH_EDBG_DEST_DRAM		= BIT(8), +	IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL	= BIT(9), +	IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER	= BIT(10), +	IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF	= BIT(11), +	IWL_PRPH_SCRATCH_RB_SIZE_4K		= BIT(16), +	IWL_PRPH_SCRATCH_MTR_MODE		= BIT(17), +	IWL_PRPH_SCRATCH_MTR_FORMAT		= BIT(18) | BIT(19), +	IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK	= 0xf << 20, +	IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K		= 8 << 20, +	IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K	= 9 << 20, +	IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K	= 10 << 20, +	IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE	= BIT(29), +	IWL_PRPH_SCRATCH_TOP_RESET		= BIT(30), +}; + +/** + * enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags + * @IWL_PRPH_SCRATCH_EXT_EXT_FSEQ: external FSEQ image provided + * @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting + * @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode + * @IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID: use external 32 KHz clock + */ +enum iwl_prph_scratch_ext_flags { +	IWL_PRPH_SCRATCH_EXT_EXT_FSEQ		= BIT(0), +	IWL_PRPH_SCRATCH_EXT_URM_FW		= BIT(4), +	IWL_PRPH_SCRATCH_EXT_URM_PERM		= BIT(5), +	IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID	= BIT(8), +}; + +/** + * struct iwl_prph_scratch_version - version structure + * @mac_id: SKU and revision id + * @version: prph scratch information version id + * @size: the size of the context information in DWs + * @reserved: reserved + */ +struct iwl_prph_scratch_version { +	__le16 mac_id; +	__le16 version; +	__le16 size; +	__le16 reserved; +} __packed; /* PERIPH_SCRATCH_VERSION_S */ + +/** + * struct iwl_prph_scratch_control - control structure + * @control_flags: context information flags see &enum iwl_prph_scratch_flags + * @control_flags_ext: context information for extended flags, + *	see &enum iwl_prph_scratch_ext_flags + */ +struct iwl_prph_scratch_control { +	__le32 control_flags; +	__le32 control_flags_ext; +} __packed; /* PERIPH_SCRATCH_CONTROL_S */ + +/** + * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch + * @pnvm_base_addr: PNVM start address + * @pnvm_size: the size of the PNVM image in bytes + * @reserved: reserved + */ +struct iwl_prph_scratch_pnvm_cfg { +	__le64 pnvm_base_addr; +	__le32 pnvm_size; +	__le32 reserved; +} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */ + +/** + * struct iwl_prph_scratch_mem_desc_addr_array - DRAM + * @mem_descs: array of dram addresses. + * Each address is the beginning of a PNVM payload. + */ +struct iwl_prph_scratch_mem_desc_addr_array { +	__le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX]; +} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */ + +/** + * struct iwl_prph_scratch_hwm_cfg - hwm config + * @hwm_base_addr: hwm start address + * @hwm_size: hwm size in DWs + * @debug_token_config: debug preset + */ +struct iwl_prph_scratch_hwm_cfg { +	__le64 hwm_base_addr; +	__le32 hwm_size; +	__le32 debug_token_config; +} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */ + +/** + * struct iwl_prph_scratch_rbd_cfg - RBDs configuration + * @free_rbd_addr: default queue free RB CB base address + * @reserved: reserved + */ +struct iwl_prph_scratch_rbd_cfg { +	__le64 free_rbd_addr; +	__le32 reserved; +} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */ + +/** + * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table + * @base_addr: reduce power table address + * @size: the size of the entire power table image + * @reserved: (reserved) + */ +struct iwl_prph_scratch_uefi_cfg { +	__le64 base_addr; +	__le32 size; +	__le32 reserved; +} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */ + +/** + * struct iwl_prph_scratch_step_cfg - prph scratch step configuration + * @mbx_addr_0: [0:7] revision, + *		[8:15] cnvi_to_cnvr length, + *		[16:23] cnvr_to_cnvi channel length, + *		[24:31] radio1 reserved + * @mbx_addr_1: [0:7] radio2 reserved + */ + +struct iwl_prph_scratch_step_cfg { +	__le32 mbx_addr_0; +	__le32 mbx_addr_1; +} __packed; + +/** + * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config + * @version: version information of context info and HW + * @control: control flags of FH configurations + * @pnvm_cfg: ror configuration + * @hwm_cfg: hwm configuration + * @rbd_cfg: default RX queue configuration + * @reduce_power_cfg: UEFI power reduction table + * @step_cfg: step configuration + */ +struct iwl_prph_scratch_ctrl_cfg { +	struct iwl_prph_scratch_version version; +	struct iwl_prph_scratch_control control; +	struct iwl_prph_scratch_pnvm_cfg pnvm_cfg; +	struct iwl_prph_scratch_hwm_cfg hwm_cfg; +	struct iwl_prph_scratch_rbd_cfg rbd_cfg; +	struct iwl_prph_scratch_uefi_cfg reduce_power_cfg; +	struct iwl_prph_scratch_step_cfg step_cfg; +} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */ + +#define IWL_NUM_DRAM_FSEQ_ENTRIES	8 + +/** + * struct iwl_context_info_dram_fseq - images DRAM map (with fseq) + * each entry in the map represents a DRAM chunk of up to 32 KB + * @common: UMAC/LMAC/virtual images + * @fseq_img: FSEQ image DRAM map + */ +struct iwl_context_info_dram_fseq { +	struct iwl_context_info_dram_nonfseq common; +	__le64 fseq_img[IWL_NUM_DRAM_FSEQ_ENTRIES]; +} __packed; /* PERIPH_SCRATCH_DRAM_MAP_S */ + +/** + * struct iwl_prph_scratch - peripheral scratch mapping + * @ctrl_cfg: control and configuration of prph scratch + * @dram: firmware images addresses in DRAM + * @fseq_override: FSEQ override parameters + * @step_analog_params: STEP analog calibration values + * @reserved: reserved + */ +struct iwl_prph_scratch { +	struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; +	__le32 fseq_override; +	__le32 step_analog_params; +	__le32 reserved[8]; +	struct iwl_context_info_dram_fseq dram; +} __packed; /* PERIPH_SCRATCH_S */ + +/** + * struct iwl_prph_info - peripheral information + * @boot_stage_mirror: reflects the value in the Boot Stage CSR register + * @ipc_status_mirror: reflects the value in the IPC Status CSR register + * @sleep_notif: indicates the peripheral sleep status + * @reserved: reserved + */ +struct iwl_prph_info { +	__le32 boot_stage_mirror; +	__le32 ipc_status_mirror; +	__le32 sleep_notif; +	__le32 reserved; +} __packed; /* PERIPH_INFO_S */ + +/** + * struct iwl_context_info_v2 - device INIT configuration + * @version: version of the context information + * @size: size of context information in DWs + * @config: context in which the peripheral would execute - a subset of + *	capability csr register published by the peripheral + * @prph_info_base_addr: the peripheral information structure start address + * @cr_head_idx_arr_base_addr: the completion ring head index array + *	start address + * @tr_tail_idx_arr_base_addr: the transfer ring tail index array + *	start address + * @cr_tail_idx_arr_base_addr: the completion ring tail index array + *	start address + * @tr_head_idx_arr_base_addr: the transfer ring head index array + *	start address + * @cr_idx_arr_size: number of entries in the completion ring index array + * @tr_idx_arr_size: number of entries in the transfer ring index array + * @mtr_base_addr: the message transfer ring start address + * @mcr_base_addr: the message completion ring start address + * @mtr_size: number of entries which the message transfer ring can hold + * @mcr_size: number of entries which the message completion ring can hold + * @mtr_doorbell_vec: the doorbell vector associated with the message + *	transfer ring + * @mcr_doorbell_vec: the doorbell vector associated with the message + *	completion ring + * @mtr_msi_vec: the MSI which shall be generated by the peripheral after + *	completing a transfer descriptor in the message transfer ring + * @mcr_msi_vec: the MSI which shall be generated by the peripheral after + *	completing a completion descriptor in the message completion ring + * @mtr_opt_header_size: the size of the optional header in the transfer + *	descriptor associated with the message transfer ring in DWs + * @mtr_opt_footer_size: the size of the optional footer in the transfer + *	descriptor associated with the message transfer ring in DWs + * @mcr_opt_header_size: the size of the optional header in the completion + *	descriptor associated with the message completion ring in DWs + * @mcr_opt_footer_size: the size of the optional footer in the completion + *	descriptor associated with the message completion ring in DWs + * @msg_rings_ctrl_flags: message rings control flags + * @prph_info_msi_vec: the MSI which shall be generated by the peripheral + *	after updating the Peripheral Information structure + * @prph_scratch_base_addr: the peripheral scratch structure start address + * @prph_scratch_size: the size of the peripheral scratch structure in DWs + * @reserved: reserved + */ +struct iwl_context_info_v2 { +	__le16 version; +	__le16 size; +	__le32 config; +	__le64 prph_info_base_addr; +	__le64 cr_head_idx_arr_base_addr; +	__le64 tr_tail_idx_arr_base_addr; +	__le64 cr_tail_idx_arr_base_addr; +	__le64 tr_head_idx_arr_base_addr; +	__le16 cr_idx_arr_size; +	__le16 tr_idx_arr_size; +	__le64 mtr_base_addr; +	__le64 mcr_base_addr; +	__le16 mtr_size; +	__le16 mcr_size; +	__le16 mtr_doorbell_vec; +	__le16 mcr_doorbell_vec; +	__le16 mtr_msi_vec; +	__le16 mcr_msi_vec; +	u8 mtr_opt_header_size; +	u8 mtr_opt_footer_size; +	u8 mcr_opt_header_size; +	u8 mcr_opt_footer_size; +	__le16 msg_rings_ctrl_flags; +	__le16 prph_info_msi_vec; +	__le64 prph_scratch_base_addr; +	__le32 prph_scratch_size; +	__le32 reserved; +} __packed; /* IPC_CONTEXT_INFO_S */ + +int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans, +				const struct iwl_fw *fw, +				const struct fw_img *img); +void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans); +void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive); + +int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans, +					 const struct iwl_pnvm_image *pnvm_payloads, +					 const struct iwl_ucode_capabilities *capa); +void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans, +					 const struct iwl_ucode_capabilities *capa); +int +iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans, +					     const struct iwl_pnvm_image *payloads, +					     const struct iwl_ucode_capabilities *capa); +void +iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans, +					    const struct iwl_ucode_capabilities *capa); +#endif /* __iwl_context_info_file_v2_h__ */ diff --git a/sys/contrib/dev/iwlwifi/pcie/iwl-context-info.h b/sys/contrib/dev/iwlwifi/pcie/iwl-context-info.h new file mode 100644 index 000000000000..7ae0fbdef208 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/pcie/iwl-context-info.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020, 2022, 2024-2025 Intel Corporation + */ +#ifndef __iwl_context_info_file_h__ +#define __iwl_context_info_file_h__ + +/* maximum number of DRAM map entries supported by FW */ +#define IWL_MAX_DRAM_ENTRY	64 +#define CSR_CTXT_INFO_BA	0x40 + +/** + * enum iwl_context_info_flags - Context information control flags + * @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting + *	the init done for driver command that configures several system modes + * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug + * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump + * @IWL_CTXT_INFO_RB_CB_SIZE: mask of the RBD Cyclic Buffer Size + *	exponent, the actual size is 2**value, valid sizes are 8-2048. + *	The value is four bits long. Maximum valid exponent is 12 + * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the + *	default is short format - not supported by the driver) + * @IWL_CTXT_INFO_RB_SIZE: RB size mask + *	(values are IWL_CTXT_INFO_RB_SIZE_*K) + * @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size + * @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size + * @IWL_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size + * @IWL_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size + * @IWL_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size + * @IWL_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size + * @IWL_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size + * @IWL_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size + * @IWL_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size + * @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size + */ +enum iwl_context_info_flags { +	IWL_CTXT_INFO_AUTO_FUNC_INIT	= 0x0001, +	IWL_CTXT_INFO_EARLY_DEBUG	= 0x0002, +	IWL_CTXT_INFO_ENABLE_CDMP	= 0x0004, +	IWL_CTXT_INFO_RB_CB_SIZE	= 0x00f0, +	IWL_CTXT_INFO_TFD_FORMAT_LONG	= 0x0100, +	IWL_CTXT_INFO_RB_SIZE		= 0x1e00, +	IWL_CTXT_INFO_RB_SIZE_1K	= 0x1, +	IWL_CTXT_INFO_RB_SIZE_2K	= 0x2, +	IWL_CTXT_INFO_RB_SIZE_4K	= 0x4, +	IWL_CTXT_INFO_RB_SIZE_8K	= 0x8, +	IWL_CTXT_INFO_RB_SIZE_12K	= 0x9, +	IWL_CTXT_INFO_RB_SIZE_16K	= 0xa, +	IWL_CTXT_INFO_RB_SIZE_20K	= 0xb, +	IWL_CTXT_INFO_RB_SIZE_24K	= 0xc, +	IWL_CTXT_INFO_RB_SIZE_28K	= 0xd, +	IWL_CTXT_INFO_RB_SIZE_32K	= 0xe, +}; + +/** + * struct iwl_context_info_version - version structure + * @mac_id: SKU and revision id + * @version: context information version id + * @size: the size of the context information in DWs + * @reserved: (reserved) + */ +struct iwl_context_info_version { +	__le16 mac_id; +	__le16 version; +	__le16 size; +	__le16 reserved; +} __packed; + +/** + * struct iwl_context_info_control - version structure + * @control_flags: context information flags see &enum iwl_context_info_flags + * @reserved: (reserved) + */ +struct iwl_context_info_control { +	__le32 control_flags; +	__le32 reserved; +} __packed; + +/** + * struct iwl_context_info_dram_nonfseq - images DRAM map + * each entry in the map represents a DRAM chunk of up to 32 KB + * @umac_img: UMAC image DRAM map + * @lmac_img: LMAC image DRAM map + * @virtual_img: paged image DRAM map + */ +struct iwl_context_info_dram_nonfseq { +	__le64 umac_img[IWL_MAX_DRAM_ENTRY]; +	__le64 lmac_img[IWL_MAX_DRAM_ENTRY]; +	__le64 virtual_img[IWL_MAX_DRAM_ENTRY]; +} __packed; + +/** + * struct iwl_context_info_rbd_cfg - RBDs configuration + * @free_rbd_addr: default queue free RB CB base address + * @used_rbd_addr: default queue used RB CB base address + * @status_wr_ptr: default queue used RB status write pointer + */ +struct iwl_context_info_rbd_cfg { +	__le64 free_rbd_addr; +	__le64 used_rbd_addr; +	__le64 status_wr_ptr; +} __packed; + +/** + * struct iwl_context_info_hcmd_cfg  - command queue configuration + * @cmd_queue_addr: address of command queue + * @cmd_queue_size: number of entries + * @reserved: (reserved) + */ +struct iwl_context_info_hcmd_cfg { +	__le64 cmd_queue_addr; +	u8 cmd_queue_size; +	u8 reserved[7]; +} __packed; + +/** + * struct iwl_context_info_dump_cfg - Core Dump configuration + * @core_dump_addr: core dump (debug DRAM address) start address + * @core_dump_size: size, in DWs + * @reserved: (reserved) + */ +struct iwl_context_info_dump_cfg { +	__le64 core_dump_addr; +	__le32 core_dump_size; +	__le32 reserved; +} __packed; + +/** + * struct iwl_context_info_pnvm_cfg - platform NVM data configuration + * @platform_nvm_addr: Platform NVM data start address + * @platform_nvm_size: size in DWs + * @reserved: (reserved) + */ +struct iwl_context_info_pnvm_cfg { +	__le64 platform_nvm_addr; +	__le32 platform_nvm_size; +	__le32 reserved; +} __packed; + +/** + * struct iwl_context_info_early_dbg_cfg - early debug configuration for + *	dumping DRAM addresses + * @early_debug_addr: early debug start address + * @early_debug_size: size in DWs + * @reserved: (reserved) + */ +struct iwl_context_info_early_dbg_cfg { +	__le64 early_debug_addr; +	__le32 early_debug_size; +	__le32 reserved; +} __packed; + +/** + * struct iwl_context_info - device INIT configuration + * @version: version information of context info and HW + * @control: control flags of FH configurations + * @reserved0: (reserved) + * @rbd_cfg: default RX queue configuration + * @hcmd_cfg: command queue configuration + * @reserved1: (reserved) + * @dump_cfg: core dump data + * @edbg_cfg: early debug configuration + * @pnvm_cfg: platform nvm configuration + * @reserved2: (reserved) + * @dram: firmware image addresses in DRAM + * @reserved3: (reserved) + */ +struct iwl_context_info { +	struct iwl_context_info_version version; +	struct iwl_context_info_control control; +	__le64 reserved0; +	struct iwl_context_info_rbd_cfg rbd_cfg; +	struct iwl_context_info_hcmd_cfg hcmd_cfg; +	__le32 reserved1[4]; +	struct iwl_context_info_dump_cfg dump_cfg; +	struct iwl_context_info_early_dbg_cfg edbg_cfg; +	struct iwl_context_info_pnvm_cfg pnvm_cfg; +	__le32 reserved2[16]; +	struct iwl_context_info_dram_nonfseq dram; +	__le32 reserved3[16]; +} __packed; /* BOOT_LOADER_CONTEXT_INFO_S */ + +int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *img); +void iwl_pcie_ctxt_info_free(struct iwl_trans *trans); +void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); +int iwl_pcie_init_fw_sec(struct iwl_trans *trans, +			 const struct fw_img *fw, +			 struct iwl_context_info_dram_nonfseq *ctxt_dram); +void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, +					    size_t size, +					    dma_addr_t *phys); +int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, +				 const void *data, u32 len, +				 struct iwl_dram_data *dram); + +#endif /* __iwl_context_info_file_h__ */ diff --git a/sys/contrib/dev/iwlwifi/pcie/utils.c b/sys/contrib/dev/iwlwifi/pcie/utils.c new file mode 100644 index 000000000000..d777e1517cc5 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/pcie/utils.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2025 Intel Corporation + */ + +#include <linux/pci.h> +#include <linux/gfp.h> + +#include "iwl-io.h" +#include "pcie/utils.h" + +void iwl_trans_pcie_dump_regs(struct iwl_trans *trans, struct pci_dev *pdev) +{ +#define PCI_DUMP_SIZE		352 +#define PCI_MEM_DUMP_SIZE	64 +#define PCI_PARENT_DUMP_SIZE	524 +#define PREFIX_LEN		32 + +	static bool pcie_dbg_dumped_once = 0; +	u32 i, pos, alloc_size, *ptr, *buf; +	char *prefix; + +	if (pcie_dbg_dumped_once) +		return; + +	/* Should be a multiple of 4 */ +	BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); +	BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); +	BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); + +	/* Alloc a max size buffer */ +	alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN; +	alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); +	alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); +	alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); + +	buf = kmalloc(alloc_size, GFP_ATOMIC); +	if (!buf) +		return; +	prefix = (char *)buf + alloc_size - PREFIX_LEN; + +	IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); + +	/* Print wifi device registers */ +	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); +	IWL_ERR(trans, "iwlwifi device config registers:\n"); +	for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) +		if (pci_read_config_dword(pdev, i, ptr)) +			goto err_read; +#if defined(__linux__) +	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); +#elif defined(__FreeBSD__) +	iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); +#endif + +	IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); +	for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) +		*ptr = iwl_read32(trans, i); +#if defined(__linux__) +	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); +#elif defined(__FreeBSD__) +	iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); +#endif + +	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); +	if (pos) { +		IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); +		for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) +			if (pci_read_config_dword(pdev, pos + i, ptr)) +				goto err_read; +#if defined(__linux__) +		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, +			       32, 4, buf, i, 0); +#elif defined(__FreeBSD__) +		iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); +#endif +	} + +	/* Print parent device registers next */ +	if (!pdev->bus->self) +		goto out; + +	pdev = pdev->bus->self; +	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); + +	IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", +		pci_name(pdev)); +	for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) +		if (pci_read_config_dword(pdev, i, ptr)) +			goto err_read; +#if defined(__linux__) +	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); +#elif defined(__FreeBSD__) +	iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); +#endif + +	/* Print root port AER registers */ +	pos = 0; +	pdev = pcie_find_root_port(pdev); +	if (pdev) +		pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); +	if (pos) { +		IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", +			pci_name(pdev)); +		sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); +		for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) +			if (pci_read_config_dword(pdev, pos + i, ptr)) +				goto err_read; +#if defined(__linux__) +		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, +			       4, buf, i, 0); +#elif defined(__FreeBSD__) +		iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); +#endif +	} +	goto out; + +err_read: +#if defined(__linux__) +	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); +#elif defined(__FreeBSD__) +	iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); +#endif +	IWL_ERR(trans, "Read failed at 0x%X\n", i); +out: +	pcie_dbg_dumped_once = 1; +	kfree(buf); +} diff --git a/sys/contrib/dev/iwlwifi/pcie/utils.h b/sys/contrib/dev/iwlwifi/pcie/utils.h new file mode 100644 index 000000000000..27437d5e099b --- /dev/null +++ b/sys/contrib/dev/iwlwifi/pcie/utils.h @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2025 Intel Corporation + */ + +#ifndef __iwl_pcie_utils_h__ +#define __iwl_pcie_utils_h__ + +#include "iwl-io.h" + +void iwl_trans_pcie_dump_regs(struct iwl_trans *trans, struct pci_dev *pdev); + +static inline void _iwl_trans_set_bits_mask(struct iwl_trans *trans, +					    u32 reg, u32 mask, u32 value) +{ +	u32 v; + +#ifdef CONFIG_IWLWIFI_DEBUG +	WARN_ON_ONCE(value & ~mask); +#endif + +	v = iwl_read32(trans, reg); +	v &= ~mask; +	v |= value; +	iwl_write32(trans, reg, v); +} + +static inline void iwl_trans_clear_bit(struct iwl_trans *trans, +				       u32 reg, u32 mask) +{ +	_iwl_trans_set_bits_mask(trans, reg, mask, 0); +} + +static inline void iwl_trans_set_bit(struct iwl_trans *trans, +				     u32 reg, u32 mask) +{ +	_iwl_trans_set_bits_mask(trans, reg, mask, mask); +} + +#endif /* __iwl_pcie_utils_h__ */ | 
