aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpi_support/acpi_system76.c21
-rw-r--r--sys/dev/acpica/acpi.c90
-rw-r--r--sys/dev/acpica/acpi_spmc.c76
-rw-r--r--sys/dev/acpica/acpivar.h2
-rw-r--r--sys/dev/amdsmu/amdsmu.c34
-rw-r--r--sys/dev/amdsmu/amdsmu.h19
-rw-r--r--sys/dev/amdsmu/amdsmu_reg.h5
-rw-r--r--sys/dev/asmc/asmc.c785
-rw-r--r--sys/dev/asmc/asmcvar.h51
-rw-r--r--sys/dev/bce/if_bce.c2
-rw-r--r--sys/dev/bhnd/bhnd_bus_if.m2
-rw-r--r--sys/dev/clk/clk_fixed.c8
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c19
-rw-r--r--sys/dev/cxgbe/crypto/t6_kern_tls.c6
-rw-r--r--sys/dev/cxgbe/crypto/t7_kern_tls.c2
-rw-r--r--sys/dev/cxgbe/cxgbei/cxgbei.c17
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c13
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/qp.c24
-rw-r--r--sys/dev/cxgbe/nvmf/nvmf_che.c23
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c3
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c44
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c18
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c14
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.c18
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c10
-rw-r--r--sys/dev/dpaa2/dpaa2_buf.c9
-rw-r--r--sys/dev/dpaa2/dpaa2_buf.h2
-rw-r--r--sys/dev/dpaa2/dpaa2_frame.c165
-rw-r--r--sys/dev/dpaa2/dpaa2_frame.h174
-rw-r--r--sys/dev/dpaa2/dpaa2_ni.c214
-rw-r--r--sys/dev/dpaa2/dpaa2_ni.h3
-rw-r--r--sys/dev/dpaa2/dpaa2_swp.h51
-rw-r--r--sys/dev/dpaa2/dpaa2_types.h5
-rw-r--r--sys/dev/dwc/if_dwc.c8
-rw-r--r--sys/dev/etherswitch/e6000sw/e6000sw.c6
-rw-r--r--sys/dev/etherswitch/e6000sw/e6000swreg.h1
-rw-r--r--sys/dev/evdev/evdev_utils.c11
-rw-r--r--sys/dev/hid/appleir.c440
-rw-r--r--sys/dev/hid/bcm5974.c80
-rw-r--r--sys/dev/hid/hgame.c16
-rw-r--r--sys/dev/hid/hid.h8
-rw-r--r--sys/dev/hid/hkbd.c133
-rw-r--r--sys/dev/hwpmc/hwpmc_amd.c51
-rw-r--r--sys/dev/hwpmc/hwpmc_ibs.c9
-rw-r--r--sys/dev/hwpmc/hwpmc_ibs.h24
-rw-r--r--sys/dev/hwpmc/hwpmc_intel.c6
-rw-r--r--sys/dev/hwpmc/hwpmc_logging.c35
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c60
-rw-r--r--sys/dev/hyperv/vmbus/x86/hyperv_reg.h6
-rw-r--r--sys/dev/ichwd/i6300esbwd.c3
-rw-r--r--sys/dev/iicbus/iic.c39
-rw-r--r--sys/dev/iicbus/iic.h8
-rw-r--r--sys/dev/irdma/fbsd_kcompat.c160
-rw-r--r--sys/dev/irdma/fbsd_kcompat.h19
-rw-r--r--sys/dev/irdma/ice_devids.h26
-rw-r--r--sys/dev/irdma/icrdma.c32
-rw-r--r--sys/dev/irdma/icrdma_hw.c59
-rw-r--r--sys/dev/irdma/icrdma_hw.h17
-rw-r--r--sys/dev/irdma/irdma.h4
-rw-r--r--sys/dev/irdma/irdma_cm.c493
-rw-r--r--sys/dev/irdma/irdma_cm.h3
-rw-r--r--sys/dev/irdma/irdma_ctrl.c273
-rw-r--r--sys/dev/irdma/irdma_defs.h136
-rw-r--r--sys/dev/irdma/irdma_hmc.c12
-rw-r--r--sys/dev/irdma/irdma_hw.c151
-rw-r--r--sys/dev/irdma/irdma_kcompat.c301
-rw-r--r--sys/dev/irdma/irdma_main.h41
-rw-r--r--sys/dev/irdma/irdma_pble.c10
-rw-r--r--sys/dev/irdma/irdma_protos.h7
-rw-r--r--sys/dev/irdma/irdma_puda.c25
-rw-r--r--sys/dev/irdma/irdma_puda.h14
-rw-r--r--sys/dev/irdma/irdma_type.h51
-rw-r--r--sys/dev/irdma/irdma_uda.h2
-rw-r--r--sys/dev/irdma/irdma_uda_d.h4
-rw-r--r--sys/dev/irdma/irdma_uk.c250
-rw-r--r--sys/dev/irdma/irdma_user.h26
-rw-r--r--sys/dev/irdma/irdma_utils.c376
-rw-r--r--sys/dev/irdma/irdma_verbs.c245
-rw-r--r--sys/dev/irdma/irdma_verbs.h25
-rw-r--r--sys/dev/irdma/irdma_ws.c157
-rw-r--r--sys/dev/irdma/osdep.h9
-rw-r--r--sys/dev/isci/scil/scic_sds_phy_registers.h2
-rw-r--r--sys/dev/isci/scil/scic_sds_remote_node_table.c4
-rw-r--r--sys/dev/isci/scil/scic_sds_stp_request.h2
-rw-r--r--sys/dev/isci/scil/scif_remote_device.h2
-rw-r--r--sys/dev/ixgbe/if_ix.c75
-rw-r--r--sys/dev/ixgbe/if_ix_mdio.c158
-rw-r--r--sys/dev/ixgbe/if_ix_mdio.h34
-rw-r--r--sys/dev/ixgbe/if_ix_mdio_hw.c181
-rw-r--r--sys/dev/ixgbe/if_ix_mdio_hw.h33
-rw-r--r--sys/dev/ixgbe/if_sriov.c2
-rw-r--r--sys/dev/ixgbe/ixgbe.h5
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c4
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.c534
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.h13
-rw-r--r--sys/dev/ixgbe/ixgbe_features.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_fw_logging.c467
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.c36
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h3
-rw-r--r--sys/dev/ixgbe/ixgbe_sriov.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_type_e610.h57
-rw-r--r--sys/dev/ixgbe/ixgbe_x540.c16
-rw-r--r--sys/dev/ixl/ixl_txrx.c2
-rw-r--r--sys/dev/mlx5/driver.h1
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_cmd.c12
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c68
-rw-r--r--sys/dev/nvme/nvme_ahci.c3
-rw-r--r--sys/dev/nvme/nvme_ctrlr.c50
-rw-r--r--sys/dev/nvme/nvme_pci.c4
-rw-r--r--sys/dev/nvme/nvme_private.h19
-rw-r--r--sys/dev/nvme/nvme_qpair.c7
-rw-r--r--sys/dev/nvme/nvme_sim.c2
-rw-r--r--sys/dev/nvmf/controller/nvmft_controller.c2
-rw-r--r--sys/dev/ofw/openfirm.c4
-rw-r--r--sys/dev/ofw/openfirm.h2
-rw-r--r--sys/dev/pci/pcireg.h1
-rw-r--r--sys/dev/qcom_clk/qcom_clk_rcg2.c8
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_clock.c3
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c9
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_main.c22
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_msm8916.h41
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_msm8916_clock.c84
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_msm8916_reset.c71
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_var.h1
-rw-r--r--sys/dev/rge/if_rge.c113
-rw-r--r--sys/dev/rge/if_rge_hw.c53
-rw-r--r--sys/dev/rge/if_rge_hw.h1
-rw-r--r--sys/dev/rge/if_rge_sysctl.c10
-rw-r--r--sys/dev/rge/if_rgevar.h9
-rw-r--r--sys/dev/sound/midi/midi.c2
-rw-r--r--sys/dev/sound/pcm/ac97.c18
-rw-r--r--sys/dev/sound/pcm/buffer.c26
-rw-r--r--sys/dev/sound/pcm/channel.c159
-rw-r--r--sys/dev/sound/pcm/channel.h17
-rw-r--r--sys/dev/sound/pcm/dsp.c13
-rw-r--r--sys/dev/sound/pcm/feeder.c6
-rw-r--r--sys/dev/sound/pcm/feeder.h21
-rw-r--r--sys/dev/sound/pcm/feeder_chain.c12
-rw-r--r--sys/dev/sound/pcm/feeder_mixer.c19
-rw-r--r--sys/dev/sound/pcm/feeder_rate.c57
-rw-r--r--sys/dev/sound/pcm/feeder_volume.c23
-rw-r--r--sys/dev/sound/pcm/matrix.h13
-rw-r--r--sys/dev/sound/pcm/mixer.c4
-rw-r--r--sys/dev/sound/sndstat.c4
-rw-r--r--sys/dev/sound/usb/uaudio.c27
-rw-r--r--sys/dev/sound/usb/uaudio_pcm.c13
-rw-r--r--sys/dev/thunderbolt/nhi.c49
-rw-r--r--sys/dev/thunderbolt/nhi_pci.c121
-rw-r--r--sys/dev/thunderbolt/nhi_var.h18
-rw-r--r--sys/dev/thunderbolt/tb_pcib.c12
-rw-r--r--sys/dev/tpm/tpm20.c116
-rw-r--r--sys/dev/tpm/tpm20.h13
-rw-r--r--sys/dev/tpm/tpm_crb.c19
-rw-r--r--sys/dev/tpm/tpm_if.m5
-rw-r--r--sys/dev/tpm/tpm_tis_core.c28
-rw-r--r--sys/dev/uart/uart_bus_pci.c56
-rw-r--r--sys/dev/uart/uart_dev_ns8250.c9
-rw-r--r--sys/dev/ufshci/ufshci_acpi.c248
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c71
-rw-r--r--sys/dev/ufshci/ufshci_dev.c12
-rw-r--r--sys/dev/ufshci/ufshci_pci.c3
-rw-r--r--sys/dev/ufshci/ufshci_private.h15
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c40
-rw-r--r--sys/dev/ufshci/ufshci_sysctl.c35
-rw-r--r--sys/dev/usb/input/ukbd.c136
-rw-r--r--sys/dev/usb/input/wsp.c17
-rw-r--r--sys/dev/usb/net/if_smsc.c2
-rw-r--r--sys/dev/usb/net/if_ure.c69
-rw-r--r--sys/dev/usb/serial/uvscom.c3
-rw-r--r--sys/dev/usb/usbdevs7
-rw-r--r--sys/dev/virtio/block/virtio_blk.c94
-rw-r--r--sys/dev/virtio/pci/virtio_pci_modern.c2
-rw-r--r--sys/dev/virtio/virtqueue.c2
-rw-r--r--sys/dev/vmgenc/vmgenc_acpi.c2
-rw-r--r--sys/dev/vmm/vmm_dev.c16
-rw-r--r--sys/dev/vt/vt_core.c20
176 files changed, 7169 insertions, 2539 deletions
diff --git a/sys/dev/acpi_support/acpi_system76.c b/sys/dev/acpi_support/acpi_system76.c
index a4ac848a0fec..1ba287ccb85d 100644
--- a/sys/dev/acpi_support/acpi_system76.c
+++ b/sys/dev/acpi_support/acpi_system76.c
@@ -27,18 +27,19 @@
*/
#include "opt_acpi.h"
+
#include <sys/param.h>
-#include <sys/kernel.h>
#include <sys/bus.h>
+#include <sys/kernel.h>
#include <sys/module.h>
+#include <sys/sysctl.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <contrib/dev/acpica/include/accommon.h>
#include <dev/acpica/acpivar.h>
-#include <sys/sysctl.h>
-
#include <dev/backlight/backlight.h>
+
#include "backlight_if.h"
#define _COMPONENT ACPI_OEM
@@ -91,8 +92,8 @@ static int acpi_system76_backlight_get_info(device_t dev,
enum {
S76_CTRL_KBB = 1, /* Keyboard Brightness */
S76_CTRL_KBC = 2, /* Keyboard Color */
- S76_CTRL_BCTL = 3, /* Battary Charging Start Thresholds */
- S76_CTRL_BCTH = 4, /* Battary Charging End Thresholds */
+ S76_CTRL_BCTL = 3, /* Battery Charging Start Thresholds */
+ S76_CTRL_BCTH = 4, /* Battery Charging End Thresholds */
};
#define S76_CTRL_MAX 5
@@ -125,16 +126,16 @@ static const struct s76_ctrl_table s76_sysctl_table[] = {
.desc = "Keyboard Color",
},
[S76_CTRL_BCTL] = {
- .name = "battary_thresholds_low",
+ .name = "battery_charge_min",
.get_method = S76_CTRL_GBCT,
.set_method = S76_CTRL_SBCT,
- .desc = "Battary charging start thresholds",
+ .desc = "Start charging the battery when this threshold is reached (percentage)",
},
[S76_CTRL_BCTH] = {
- .name = "battary_thresholds_high",
+ .name = "battery_charge_max",
.get_method = S76_CTRL_GBCT,
.set_method = S76_CTRL_SBCT,
- .desc = "Battary charging end thresholds",
+ .desc = "Stop charging the battery when this threshold is reached (percentage)",
},
};
@@ -376,7 +377,7 @@ acpi_system76_sysctl_handler(SYSCTL_HANDLER_ARGS)
if (req->newptr == NULL) {
/*
- * ACPI will not notify us if battary thresholds changes
+ * ACPI will not notify us if battery thresholds changes
* outside this module. Therefore, always fetch those values.
*/
if (method != S76_CTRL_BCTL && method != S76_CTRL_BCTH)
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 01b584ec30aa..bdc197a4fb59 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -2131,44 +2131,64 @@ acpi_bus_get_prop(device_t bus, device_t child, const char *propname,
}
}
+static int
+acpi_device_pwr_for_sleep_sxd(device_t dev, ACPI_HANDLE handle, int state,
+ int *dstate)
+{
+ ACPI_STATUS status;
+ char sxd[8];
+
+ /* Note illegal _S0D is evaluated because some systems expect this. */
+ snprintf(sxd, sizeof(sxd), "_S%dD", state);
+ status = acpi_GetInteger(handle, sxd, dstate);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ device_printf(dev, "failed to get %s on %s: %s\n", sxd,
+ acpi_name(handle), AcpiFormatException(status));
+ return (ENXIO);
+ }
+ return (0);
+}
+
+/*
+ * Get the D-state we need to set the device to for entry into the sleep type
+ * we are currently entering (sc->acpi_stype is set in acpi_EnterSleepState
+ * before the ACPI bus gets suspended, and thus before this function is called).
+ *
+ * If entering s2idle, we will try to enter whichever D-state we would've been
+ * transitioning to in S3. If we are entering an ACPI S-state, we evaluate the
+ * relevant _SxD state instead (ACPI 7.3.16 - 7.3.19).
+ */
int
acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
{
- struct acpi_softc *sc;
- ACPI_HANDLE handle;
- ACPI_STATUS status;
- char sxd[8];
-
- handle = acpi_get_handle(dev);
+ struct acpi_softc *sc = device_get_softc(bus);
+ ACPI_HANDLE handle = acpi_get_handle(dev);
+ int state;
- /*
- * XXX If we find these devices, don't try to power them down.
- * The serial and IRDA ports on my T23 hang the system when
- * set to D3 and it appears that such legacy devices may
- * need special handling in their drivers.
- */
- if (dstate == NULL || handle == NULL ||
- acpi_MatchHid(handle, "PNP0500") ||
- acpi_MatchHid(handle, "PNP0501") ||
- acpi_MatchHid(handle, "PNP0502") ||
- acpi_MatchHid(handle, "PNP0510") ||
- acpi_MatchHid(handle, "PNP0511"))
- return (ENXIO);
+ if (dstate == NULL)
+ return (EINVAL);
- /*
- * Override next state with the value from _SxD, if present.
- * Note illegal _S0D is evaluated because some systems expect this.
- */
- sc = device_get_softc(bus);
- snprintf(sxd, sizeof(sxd), "_S%dD", acpi_stype_to_sstate(sc, sc->acpi_stype));
- status = acpi_GetInteger(handle, sxd, dstate);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- device_printf(dev, "failed to get %s on %s: %s\n", sxd,
- acpi_name(handle), AcpiFormatException(status));
- return (ENXIO);
- }
+ /*
+ * XXX If we find these devices, don't try to power them down.
+ * The serial and IRDA ports on my T23 hang the system when
+ * set to D3 and it appears that such legacy devices may
+ * need special handling in their drivers.
+ */
+ if (handle == NULL ||
+ acpi_MatchHid(handle, "PNP0500") ||
+ acpi_MatchHid(handle, "PNP0501") ||
+ acpi_MatchHid(handle, "PNP0502") ||
+ acpi_MatchHid(handle, "PNP0510") ||
+ acpi_MatchHid(handle, "PNP0511"))
+ return (ENXIO);
- return (0);
+ if (sc->acpi_stype == POWER_STYPE_SUSPEND_TO_IDLE)
+ state = ACPI_STATE_S3;
+ else
+ state = acpi_stype_to_sstate(sc, sc->acpi_stype);
+ if (state == ACPI_STATE_UNKNOWN)
+ return (ENOENT);
+ return (acpi_device_pwr_for_sleep_sxd(bus, handle, state, dstate));
}
/* Callback arg for our implementation of walking the namespace. */
@@ -4608,9 +4628,9 @@ acpi_stype_sysctl(SYSCTL_HANDLER_ARGS)
sstate = acpi_sname_to_sstate(name);
if (sstate < 0)
return (EINVAL);
- printf("warning: this sysctl expects a sleep type, but an ACPI "
- "S-state has been passed to it. This functionality is "
- "deprecated; see acpi(4).\n");
+ printf("warning: the 'hw.acpi.%s' sysctl expects a sleep type, but "
+ "an ACPI S-state has been passed to it. This functionality "
+ "is deprecated; see acpi(4).\n", oidp->oid_name);
MPASS(sstate < ACPI_S_STATE_COUNT);
if (acpi_supported_sstates[sstate] == false)
return (EOPNOTSUPP);
diff --git a/sys/dev/acpica/acpi_spmc.c b/sys/dev/acpica/acpi_spmc.c
index 6b4363bb364d..03944800327d 100644
--- a/sys/dev/acpica/acpi_spmc.c
+++ b/sys/dev/acpica/acpi_spmc.c
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2024-2025 The FreeBSD Foundation
+ * Copyright (c) 2024-2026 The FreeBSD Foundation
*
* This software was developed by Aymeric Wibo <obiwac@freebsd.org>
* under sponsorship from the FreeBSD Foundation.
@@ -45,6 +45,7 @@ enum intel_dsm_index {
/* Only for Microsoft DSM set. */
DSM_MODERN_ENTRY_NOTIF = 7,
DSM_MODERN_EXIT_NOTIF = 8,
+ DSM_MODERN_TURN_ON_DISPLAY = 9,
};
enum amd_dsm_index {
@@ -67,7 +68,9 @@ struct dsm_set {
const char *name;
int revision;
struct uuid uuid;
+ uint64_t dsms_supported;
uint64_t dsms_expected;
+ uint64_t extra_dsms;
};
static struct dsm_set intel_dsm_set = {
@@ -86,8 +89,9 @@ static struct dsm_set intel_dsm_set = {
0xc4eb40a0, 0x6cd2, 0x11e2, 0xbc, 0xfd,
{0x08, 0x00, 0x20, 0x0c, 0x9a, 0x66},
},
- .dsms_expected = DSM_GET_DEVICE_CONSTRAINTS | DSM_DISPLAY_OFF_NOTIF |
- DSM_DISPLAY_ON_NOTIF | DSM_ENTRY_NOTIF | DSM_EXIT_NOTIF,
+ .dsms_expected = (1 << DSM_GET_DEVICE_CONSTRAINTS) |
+ (1 << DSM_DISPLAY_OFF_NOTIF) | (1 << DSM_DISPLAY_ON_NOTIF) |
+ (1 << DSM_ENTRY_NOTIF) | (1 << DSM_EXIT_NOTIF),
};
SYSCTL_INT(_debug_acpi_spmc, OID_AUTO, intel_dsm_revision, CTLFLAG_RW,
@@ -102,9 +106,11 @@ static struct dsm_set ms_dsm_set = {
0x11e00d56, 0xce64, 0x47ce, 0x83, 0x7b,
{0x1f, 0x89, 0x8f, 0x9a, 0xa4, 0x61},
},
- .dsms_expected = DSM_DISPLAY_OFF_NOTIF | DSM_DISPLAY_ON_NOTIF |
- DSM_ENTRY_NOTIF | DSM_EXIT_NOTIF | DSM_MODERN_ENTRY_NOTIF |
- DSM_MODERN_EXIT_NOTIF,
+ .dsms_expected = (1 << DSM_DISPLAY_OFF_NOTIF) |
+ (1 << DSM_DISPLAY_ON_NOTIF) | (1 << DSM_ENTRY_NOTIF) |
+ (1 << DSM_EXIT_NOTIF) | (1 << DSM_MODERN_ENTRY_NOTIF) |
+ (1 << DSM_MODERN_EXIT_NOTIF),
+ .extra_dsms = (1 << DSM_MODERN_TURN_ON_DISPLAY),
};
static struct dsm_set amd_dsm_set = {
@@ -124,9 +130,9 @@ static struct dsm_set amd_dsm_set = {
0xe3f32452, 0xfebc, 0x43ce, 0x90, 0x39,
{0x93, 0x21, 0x22, 0xd3, 0x77, 0x21},
},
- .dsms_expected = AMD_DSM_GET_DEVICE_CONSTRAINTS | AMD_DSM_ENTRY_NOTIF |
- AMD_DSM_EXIT_NOTIF | AMD_DSM_DISPLAY_OFF_NOTIF |
- AMD_DSM_DISPLAY_ON_NOTIF,
+ .dsms_expected = (1 << AMD_DSM_GET_DEVICE_CONSTRAINTS) |
+ (1 << AMD_DSM_ENTRY_NOTIF) | (1 << AMD_DSM_EXIT_NOTIF) |
+ (1 << AMD_DSM_DISPLAY_OFF_NOTIF) | (1 << AMD_DSM_DISPLAY_ON_NOTIF),
};
SYSCTL_INT(_debug_acpi_spmc, OID_AUTO, amd_dsm_revision, CTLFLAG_RW,
@@ -188,13 +194,19 @@ acpi_spmc_probe(device_t dev)
if (ACPI_ID_PROBE(device_get_parent(dev), dev, spmc_ids, &name) > 0)
return (ENXIO);
- handle = acpi_get_handle(dev);
- if (handle == NULL)
+ if (device_get_unit(dev) > 0) {
+ device_printf(dev, "shouldn't have more than one SPMC");
return (ENXIO);
+ }
+
+ handle = acpi_get_handle(dev);
+ /* ACPI_ID_PROBE() above cannot succeed without a handle. */
+ MPASS(handle != NULL);
sc = device_get_softc(dev);
+ sc->dev = dev;
- /* Check which sets of DSM's are supported. */
+ /* Check which sets of DSMs are supported. */
sc->dsm_sets = 0;
acpi_spmc_check_dsm_set(sc, handle, &intel_dsm_set);
@@ -204,8 +216,8 @@ acpi_spmc_probe(device_t dev)
if (sc->dsm_sets == 0)
return (ENXIO);
- device_set_descf(dev, "Low Power S0 Idle (DSM sets 0x%x)",
- sc->dsm_sets);
+ device_set_descf(dev, "System Power Management Controller "
+ "(DSM sets 0x%x)", sc->dsm_sets);
return (0);
}
@@ -215,8 +227,6 @@ acpi_spmc_attach(device_t dev)
{
struct acpi_spmc_softc *sc = device_get_softc(dev);
- sc->dev = dev;
-
sc->handle = acpi_get_handle(dev);
if (sc->handle == NULL)
return (ENXIO);
@@ -252,8 +262,10 @@ static void
acpi_spmc_check_dsm_set(struct acpi_spmc_softc *sc, ACPI_HANDLE handle,
struct dsm_set *dsm_set)
{
- const uint64_t dsms_supported = acpi_DSMQuery(handle,
+ uint64_t dsms_supported = acpi_DSMQuery(handle,
(uint8_t *)&dsm_set->uuid, dsm_set->revision);
+ const uint64_t min_dsms = dsm_set->dsms_expected;
+ const uint64_t max_dsms = min_dsms | dsm_set->extra_dsms;
/*
* Check if DSM set supported at all. We do this by checking the
@@ -261,26 +273,28 @@ acpi_spmc_check_dsm_set(struct acpi_spmc_softc *sc, ACPI_HANDLE handle,
*/
if ((dsms_supported & 1) == 0)
return;
- if ((dsms_supported & dsm_set->dsms_expected)
- != dsm_set->dsms_expected) {
+ dsms_supported &= ~1;
+ dsm_set->dsms_supported = dsms_supported;
+ sc->dsm_sets |= dsm_set->flag;
+
+ if ((dsms_supported & min_dsms) != min_dsms)
device_printf(sc->dev, "DSM set %s does not support expected "
"DSMs (%#" PRIx64 " vs %#" PRIx64 "). "
"Some methods may fail.\n",
- dsm_set->name, dsms_supported, dsm_set->dsms_expected);
- }
- sc->dsm_sets |= dsm_set->flag;
+ dsm_set->name, dsms_supported, min_dsms);
+
+ if ((dsms_supported & ~max_dsms) != 0)
+ device_printf(sc->dev, "DSM set %s supports more DSMs than "
+ "expected (%#" PRIx64 " vs %#" PRIx64 ").\n", dsm_set->name,
+ dsms_supported, max_dsms);
}
static void
acpi_spmc_free_constraints(struct acpi_spmc_softc *sc)
{
- if (sc->constraints == NULL)
- return;
-
- for (size_t i = 0; i < sc->constraint_count; i++) {
- if (sc->constraints[i].name != NULL)
- free(sc->constraints[i].name, M_TEMP);
- }
+ for (size_t i = 0; i < sc->constraint_count; i++)
+ free(sc->constraints[i].name, M_TEMP);
+ sc->constraint_count = 0;
free(sc->constraints, M_TEMP);
sc->constraints = NULL;
@@ -597,6 +611,10 @@ acpi_spmc_exit_notif(device_t dev)
acpi_spmc_run_dsm(dev, &amd_dsm_set, AMD_DSM_EXIT_NOTIF);
if ((sc->dsm_sets & DSM_SET_MS) != 0) {
acpi_spmc_run_dsm(dev, &ms_dsm_set, DSM_EXIT_NOTIF);
+ if (ms_dsm_set.dsms_supported &
+ (1 << DSM_MODERN_TURN_ON_DISPLAY))
+ acpi_spmc_run_dsm(dev, &ms_dsm_set,
+ DSM_MODERN_TURN_ON_DISPLAY);
acpi_spmc_run_dsm(dev, &ms_dsm_set, DSM_MODERN_EXIT_NOTIF);
}
}
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 1099e7a25b0a..7bcac6239253 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -507,6 +507,8 @@ acpi_d_state_to_str(int state)
const char *strs[ACPI_D_STATE_COUNT] = {"D0", "D1", "D2", "D3hot",
"D3cold"};
+ if (state == ACPI_STATE_UNKNOWN)
+ return ("unknown D-state");
MPASS(state >= ACPI_STATE_D0 && state <= ACPI_D_STATES_MAX);
return (strs[state]);
}
diff --git a/sys/dev/amdsmu/amdsmu.c b/sys/dev/amdsmu/amdsmu.c
index 9a6873b43517..7b97888887c5 100644
--- a/sys/dev/amdsmu/amdsmu.c
+++ b/sys/dev/amdsmu/amdsmu.c
@@ -58,9 +58,12 @@ amdsmu_identify(driver_t *driver, device_t parent)
static int
amdsmu_probe(device_t dev)
{
+ struct amdsmu_softc *sc;
+
if (resource_disabled("amdsmu", 0))
return (ENXIO);
- if (!amdsmu_match(device_get_parent(dev), NULL))
+ sc = device_get_softc(dev);
+ if (!amdsmu_match(device_get_parent(dev), &sc->product))
return (ENXIO);
device_set_descf(dev, "AMD System Management Unit");
@@ -154,28 +157,11 @@ static int
amdsmu_get_ip_blocks(device_t dev)
{
struct amdsmu_softc *sc = device_get_softc(dev);
- const uint16_t deviceid = pci_get_device(dev);
int err;
struct amdsmu_metrics *m = &sc->metrics;
bool active;
char sysctl_descr[32];
- /* Get IP block count. */
- switch (deviceid) {
- case PCI_DEVICEID_AMD_REMBRANDT_ROOT:
- sc->ip_block_count = 12;
- break;
- case PCI_DEVICEID_AMD_PHOENIX_ROOT:
- sc->ip_block_count = 21;
- break;
- /* TODO How many IP blocks does Strix Point (and the others) have? */
- case PCI_DEVICEID_AMD_STRIX_POINT_ROOT:
- default:
- sc->ip_block_count = nitems(amdsmu_ip_blocks_names);
- }
- KASSERT(sc->ip_block_count <= nitems(amdsmu_ip_blocks_names),
- ("too many IP blocks for array"));
-
/* Get and print out IP blocks. */
err = amdsmu_cmd(dev, SMU_MSG_GET_SUP_CONSTRAINTS, 0,
&sc->active_ip_blocks);
@@ -184,13 +170,13 @@ amdsmu_get_ip_blocks(device_t dev)
return (err);
}
device_printf(dev, "Active IP blocks: ");
- for (size_t i = 0; i < sc->ip_block_count; i++) {
+ for (size_t i = 0; i < sc->product->ip_block_count; i++) {
active = (sc->active_ip_blocks & (1 << i)) != 0;
sc->ip_blocks_active[i] = active;
if (!active)
continue;
printf("%s%s", amdsmu_ip_blocks_names[i],
- i + 1 < sc->ip_block_count ? " " : "\n");
+ i + 1 < sc->product->ip_block_count ? " " : "\n");
}
/* Create a sysctl node for IP blocks. */
@@ -203,7 +189,7 @@ amdsmu_get_ip_blocks(device_t dev)
}
/* Create a sysctl node for each IP block. */
- for (size_t i = 0; i < sc->ip_block_count; i++) {
+ for (size_t i = 0; i < sc->product->ip_block_count; i++) {
/* Create the sysctl node itself for the IP block. */
snprintf(sysctl_descr, sizeof sysctl_descr,
"Metrics about the %s AMD IP block",
@@ -293,7 +279,7 @@ amdsmu_fetch_idlemask(device_t dev)
{
struct amdsmu_softc *sc = device_get_softc(dev);
- sc->idlemask = amdsmu_read4(sc, SMU_REG_IDLEMASK);
+ sc->idlemask = amdsmu_read4(sc, sc->product->idlemask_reg);
}
static void
@@ -301,6 +287,10 @@ amdsmu_suspend(device_t dev, enum power_stype stype)
{
if (stype != POWER_STYPE_SUSPEND_TO_IDLE)
return;
+ /*
+ * XXX It seems that Cezanne needs a special workaround here for
+ * firmware versions < 64.53. See amd_pmc_verify_czn_rtc() in Linux.
+ */
if (amdsmu_cmd(dev, SMU_MSG_SLEEP_HINT, true, NULL) != 0)
device_printf(dev, "failed to hint to SMU to enter sleep");
}
diff --git a/sys/dev/amdsmu/amdsmu.h b/sys/dev/amdsmu/amdsmu.h
index 857fa21cba4e..4286d515ae77 100644
--- a/sys/dev/amdsmu/amdsmu.h
+++ b/sys/dev/amdsmu/amdsmu.h
@@ -25,10 +25,20 @@
static const struct amdsmu_product {
uint16_t amdsmu_vendorid;
uint16_t amdsmu_deviceid;
+ int16_t idlemask_reg;
+ size_t ip_block_count;
} amdsmu_products[] = {
- { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_REMBRANDT_ROOT },
- { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_PHOENIX_ROOT },
- { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_STRIX_POINT_ROOT },
+ { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_CEZANNE_ROOT,
+ SMU_REG_IDLEMASK_CEZANNE, 12 },
+ { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_REMBRANDT_ROOT,
+ SMU_REG_IDLEMASK_PHOENIX, 12 },
+ { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_PHOENIX_ROOT,
+ SMU_REG_IDLEMASK_PHOENIX, 21 },
+ /*
+ * XXX Strix Point (PCI_DEVICEID_AMD_STRIX_POINT_ROOT) doesn't support
+ * S0i3 and thus doesn't have an idlemask. Since our driver doesn't
+ * yet understand this, don't attach to Strix Point for the time being.
+ */
};
static const char *const amdsmu_ip_blocks_names[] = {
@@ -59,6 +69,8 @@ static const char *const amdsmu_ip_blocks_names[] = {
CTASSERT(nitems(amdsmu_ip_blocks_names) <= 32);
struct amdsmu_softc {
+ const struct amdsmu_product *product;
+
struct sysctl_ctx_list *sysctlctx;
struct sysctl_oid *sysctlnode;
@@ -76,7 +88,6 @@ struct amdsmu_softc {
uint32_t active_ip_blocks;
struct sysctl_oid *ip_blocks_sysctlnode;
- size_t ip_block_count;
struct sysctl_oid *ip_block_sysctlnodes[
nitems(amdsmu_ip_blocks_names)];
bool ip_blocks_active[
diff --git a/sys/dev/amdsmu/amdsmu_reg.h b/sys/dev/amdsmu/amdsmu_reg.h
index d45fa60941d5..6afbcf006535 100644
--- a/sys/dev/amdsmu/amdsmu_reg.h
+++ b/sys/dev/amdsmu/amdsmu_reg.h
@@ -16,6 +16,7 @@
* out? Also, there are way more of these. I couldn't find a centralized place
* which lists them though.
*/
+#define PCI_DEVICEID_AMD_CEZANNE_ROOT 0x1630
#define PCI_DEVICEID_AMD_REMBRANDT_ROOT 0x14B5
#define PCI_DEVICEID_AMD_PHOENIX_ROOT 0x14E8
#define PCI_DEVICEID_AMD_STRIX_POINT_ROOT 0x14A4
@@ -32,7 +33,9 @@
#define SMU_REG_MESSAGE 0x538
#define SMU_REG_RESPONSE 0x980
#define SMU_REG_ARGUMENT 0x9BC
-#define SMU_REG_IDLEMASK 0xD14
+
+#define SMU_REG_IDLEMASK_CEZANNE 0x94
+#define SMU_REG_IDLEMASK_PHOENIX 0xD14
enum amdsmu_res {
SMU_RES_WAIT = 0x00,
diff --git a/sys/dev/asmc/asmc.c b/sys/dev/asmc/asmc.c
index 17a282ce0b97..0a701e6fd663 100644
--- a/sys/dev/asmc/asmc.c
+++ b/sys/dev/asmc/asmc.c
@@ -58,6 +58,9 @@
#include <dev/acpica/acpivar.h>
#include <dev/asmc/asmcvar.h>
+#include <dev/backlight/backlight.h>
+#include "backlight_if.h"
+
/*
* Device interface.
*/
@@ -67,6 +70,15 @@ static int asmc_detach(device_t dev);
static int asmc_resume(device_t dev);
/*
+ * Backlight interface.
+ */
+static int asmc_backlight_update_status(device_t dev,
+ struct backlight_props *props);
+static int asmc_backlight_get_status(device_t dev,
+ struct backlight_props *props);
+static int asmc_backlight_get_info(device_t dev, struct backlight_info *info);
+
+/*
* SMC functions.
*/
static int asmc_init(device_t dev);
@@ -111,6 +123,22 @@ static int asmc_mbp_sysctl_light_control(SYSCTL_HANDLER_ARGS);
static int asmc_mbp_sysctl_light_left_10byte(SYSCTL_HANDLER_ARGS);
static int asmc_wol_sysctl(SYSCTL_HANDLER_ARGS);
+static int asmc_key_getinfo(device_t, const char *, uint8_t *, char *);
+
+#ifdef ASMC_DEBUG
+/* Raw key access */
+static int asmc_raw_key_sysctl(SYSCTL_HANDLER_ARGS);
+static int asmc_raw_value_sysctl(SYSCTL_HANDLER_ARGS);
+static int asmc_raw_len_sysctl(SYSCTL_HANDLER_ARGS);
+static int asmc_raw_type_sysctl(SYSCTL_HANDLER_ARGS);
+#endif
+
+/* Voltage/Current/Power/Light sensor support */
+static int asmc_sensor_read(device_t, const char *, int *);
+static int asmc_sensor_sysctl(SYSCTL_HANDLER_ARGS);
+static int asmc_detect_sensors(device_t);
+static int asmc_key_dump_by_index(device_t, int, char *, char *, uint8_t *);
+
struct asmc_model {
const char *smc_model; /* smbios.system.product env var. */
const char *smc_desc; /* driver description */
@@ -322,6 +350,12 @@ static const struct asmc_model asmc_models[] = {
ASMC_MBP115_TEMPS, ASMC_MBP115_TEMPNAMES, ASMC_MBP115_TEMPDESCS
},
+ {
+ "MacBookPro13,1", "Apple SMC MacBook Pro Retina Core i5 (late 2016, 13-inch)",
+ ASMC_SMS_FUNCS_DISABLED, ASMC_FAN_FUNCS2, ASMC_LIGHT_FUNCS,
+ ASMC_MBP131_TEMPS, ASMC_MBP131_TEMPNAMES, ASMC_MBP131_TEMPDESCS
+ },
+
/* The Mac Mini has no SMS */
{
"Macmini1,1", "Apple SMC Mac Mini",
@@ -581,6 +615,12 @@ static device_method_t asmc_methods[] = {
DEVMETHOD(device_attach, asmc_attach),
DEVMETHOD(device_detach, asmc_detach),
DEVMETHOD(device_resume, asmc_resume),
+
+ /* Backlight interface */
+ DEVMETHOD(backlight_update_status, asmc_backlight_update_status),
+ DEVMETHOD(backlight_get_status, asmc_backlight_get_status),
+ DEVMETHOD(backlight_get_info, asmc_backlight_get_info),
+
DEVMETHOD_END
};
@@ -606,8 +646,10 @@ static char *asmc_ids[] = { "APP0001", NULL };
static unsigned int light_control = 0;
+ACPI_PNP_INFO(asmc_ids);
DRIVER_MODULE(asmc, acpi, asmc_driver, NULL, NULL);
MODULE_DEPEND(asmc, acpi, 1, 1, 1);
+MODULE_DEPEND(asmc, backlight, 1, 1, 1);
static const struct asmc_model *
asmc_match(device_t dev)
@@ -799,8 +841,52 @@ asmc_attach(device_t dev)
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE,
dev, 0, model->smc_light_control, "I",
"Keyboard backlight brightness control");
+
+ sc->sc_kbd_bkl = backlight_register("asmc", dev);
+ if (sc->sc_kbd_bkl == NULL) {
+ device_printf(dev, "Can not register backlight\n");
+ ret = ENXIO;
+ goto err;
+ }
}
+#ifdef ASMC_DEBUG
+ /*
+ * Raw SMC key access for debugging.
+ */
+ sc->sc_raw_tree = SYSCTL_ADD_NODE(sysctlctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "raw", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Raw SMC key access");
+
+ SYSCTL_ADD_PROC(sysctlctx,
+ SYSCTL_CHILDREN(sc->sc_raw_tree),
+ OID_AUTO, "key",
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ dev, 0, asmc_raw_key_sysctl, "A",
+ "SMC key name (4 chars)");
+
+ SYSCTL_ADD_PROC(sysctlctx,
+ SYSCTL_CHILDREN(sc->sc_raw_tree),
+ OID_AUTO, "value",
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ dev, 0, asmc_raw_value_sysctl, "A",
+ "SMC key value (hex string)");
+
+ SYSCTL_ADD_PROC(sysctlctx,
+ SYSCTL_CHILDREN(sc->sc_raw_tree),
+ OID_AUTO, "len",
+ CTLTYPE_U8 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ dev, 0, asmc_raw_len_sysctl, "CU",
+ "SMC key value length");
+
+ SYSCTL_ADD_PROC(sysctlctx,
+ SYSCTL_CHILDREN(sc->sc_raw_tree),
+ OID_AUTO, "type",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ dev, 0, asmc_raw_type_sysctl, "A",
+ "SMC key type (4 chars)");
+#endif
+
if (model->smc_sms_x == NULL)
goto nosms;
@@ -881,6 +967,19 @@ asmc_detach(device_t dev)
{
struct asmc_softc *sc = device_get_softc(dev);
+ if (sc->sc_kbd_bkl != NULL)
+ backlight_destroy(sc->sc_kbd_bkl);
+
+ /* Free sensor key arrays */
+ for (int i = 0; i < sc->sc_voltage_count; i++)
+ free(sc->sc_voltage_sensors[i], M_DEVBUF);
+ for (int i = 0; i < sc->sc_current_count; i++)
+ free(sc->sc_current_sensors[i], M_DEVBUF);
+ for (int i = 0; i < sc->sc_power_count; i++)
+ free(sc->sc_power_sensors[i], M_DEVBUF);
+ for (int i = 0; i < sc->sc_light_count; i++)
+ free(sc->sc_light_sensors[i], M_DEVBUF);
+
if (sc->sc_sms_tq) {
taskqueue_drain(sc->sc_sms_tq, &sc->sc_sms_task);
taskqueue_free(sc->sc_sms_tq);
@@ -1052,6 +1151,12 @@ nosms:
sc->sc_nkeys = 0;
}
+ /*
+ * Auto-detect and register voltage/current/power/ambient sensors.
+ * Scans SMC keys and creates sysctls for detected sensors.
+ */
+ asmc_detect_sensors(dev);
+
out_err:
#ifdef ASMC_DEBUG
asmc_dumpall(dev);
@@ -1172,10 +1277,10 @@ static int
asmc_key_dump(device_t dev, int number)
{
struct asmc_softc *sc = device_get_softc(dev);
- char key[5] = { 0 };
- char type[7] = { 0 };
+ char key[ASMC_KEYLEN + 1] = { 0 };
+ char type[ASMC_KEYINFO_RESPLEN + 1] = { 0 };
uint8_t index[4];
- uint8_t v[32];
+ uint8_t v[ASMC_MAXVAL];
uint8_t maxlen;
int i, error = 1, try = 0;
@@ -1184,40 +1289,40 @@ asmc_key_dump(device_t dev, int number)
index[0] = (number >> 24) & 0xff;
index[1] = (number >> 16) & 0xff;
index[2] = (number >> 8) & 0xff;
- index[3] = (number) & 0xff;
+ index[3] = number & 0xff;
begin:
- if (asmc_command(dev, 0x12))
+ if (asmc_command(dev, ASMC_CMDGETBYINDEX))
goto out;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < ASMC_KEYLEN; i++) {
ASMC_DATAPORT_WRITE(sc, index[i]);
- if (asmc_wait(dev, 0x04))
+ if (asmc_wait(dev, ASMC_STATUS_AWAIT_DATA))
goto out;
}
- ASMC_DATAPORT_WRITE(sc, 4);
+ ASMC_DATAPORT_WRITE(sc, ASMC_KEYLEN);
- for (i = 0; i < 4; i++) {
- if (asmc_wait(dev, 0x05))
+ for (i = 0; i < ASMC_KEYLEN; i++) {
+ if (asmc_wait(dev, ASMC_STATUS_DATA_READY))
goto out;
key[i] = ASMC_DATAPORT_READ(sc);
}
- /* get type */
- if (asmc_command(dev, 0x13))
+ /* Get key info (length + type). */
+ if (asmc_command(dev, ASMC_CMDGETINFO))
goto out;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < ASMC_KEYLEN; i++) {
ASMC_DATAPORT_WRITE(sc, key[i]);
- if (asmc_wait(dev, 0x04))
+ if (asmc_wait(dev, ASMC_STATUS_AWAIT_DATA))
goto out;
}
- ASMC_DATAPORT_WRITE(sc, 6);
+ ASMC_DATAPORT_WRITE(sc, ASMC_KEYINFO_RESPLEN);
- for (i = 0; i < 6; i++) {
- if (asmc_wait(dev, 0x05))
+ for (i = 0; i < ASMC_KEYINFO_RESPLEN; i++) {
+ if (asmc_wait(dev, ASMC_STATUS_DATA_READY))
goto out;
type[i] = ASMC_DATAPORT_READ(sc);
}
@@ -1225,42 +1330,599 @@ begin:
error = 0;
out:
if (error) {
- if (++try < 10)
+ if (++try < ASMC_MAXRETRIES)
goto begin;
- device_printf(dev, "%s for key %s failed %d times, giving up\n",
- __func__, key, try);
- mtx_unlock_spin(&sc->sc_mtx);
+ device_printf(dev,
+ "%s for key %d failed %d times, giving up\n",
+ __func__, number, try);
+ }
+ mtx_unlock_spin(&sc->sc_mtx);
+
+ if (error)
+ return (error);
+
+ maxlen = type[0];
+ type[0] = ' ';
+ type[5] = '\0';
+ if (maxlen > sizeof(v))
+ maxlen = sizeof(v);
+
+ memset(v, 0, sizeof(v));
+ error = asmc_key_read(dev, key, v, maxlen);
+ if (error)
+ return (error);
+
+ device_printf(dev, "key %d: %s, type%s (len %d), data",
+ number, key, type, maxlen);
+ for (i = 0; i < maxlen; i++)
+ printf(" %02x", v[i]);
+ printf("\n");
+
+ return (0);
+}
+#endif /* ASMC_DEBUG */
+
+/*
+ * Get key info (length and type) from SMC using command 0x13.
+ * If len is non-NULL, stores the key's value length.
+ * If type is non-NULL, stores the 4-char type string (must be at least 5 bytes).
+ */
+static int
+asmc_key_getinfo(device_t dev, const char *key, uint8_t *len, char *type)
+{
+ struct asmc_softc *sc = device_get_softc(dev);
+ uint8_t info[ASMC_KEYINFO_RESPLEN];
+ int i, error = -1, try = 0;
+
+ mtx_lock_spin(&sc->sc_mtx);
+
+begin:
+ if (asmc_command(dev, ASMC_CMDGETINFO))
+ goto out;
+
+ for (i = 0; i < ASMC_KEYLEN; i++) {
+ ASMC_DATAPORT_WRITE(sc, key[i]);
+ if (asmc_wait(dev, ASMC_STATUS_AWAIT_DATA))
+ goto out;
+ }
+
+ ASMC_DATAPORT_WRITE(sc, ASMC_KEYINFO_RESPLEN);
+
+ for (i = 0; i < ASMC_KEYINFO_RESPLEN; i++) {
+ if (asmc_wait(dev, ASMC_STATUS_DATA_READY))
+ goto out;
+ info[i] = ASMC_DATAPORT_READ(sc);
+ }
+
+ error = 0;
+out:
+ if (error && ++try < ASMC_MAXRETRIES)
+ goto begin;
+ mtx_unlock_spin(&sc->sc_mtx);
+
+ if (error == 0) {
+ if (len != NULL)
+ *len = info[0];
+ if (type != NULL) {
+ for (i = 0; i < ASMC_TYPELEN; i++)
+ type[i] = info[i + 1];
+ type[ASMC_TYPELEN] = '\0';
+ }
+ }
+ return (error);
+}
+
+#ifdef ASMC_DEBUG
+/*
+ * Raw SMC key access sysctls - enables reading/writing any SMC key by name
+ * Usage:
+ * sysctl dev.asmc.0.raw.key=AUPO # Set key, auto-detects length
+ * sysctl dev.asmc.0.raw.value # Read current value (hex bytes)
+ * sysctl dev.asmc.0.raw.value=01 # Write new value
+ */
+static int
+asmc_raw_key_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev = (device_t) arg1;
+ struct asmc_softc *sc = device_get_softc(dev);
+ char newkey[ASMC_KEYLEN + 1];
+ uint8_t keylen;
+ int error;
+
+ strlcpy(newkey, sc->sc_rawkey, sizeof(newkey));
+ error = sysctl_handle_string(oidp, newkey, sizeof(newkey), req);
+ if (error || req->newptr == NULL)
+ return (error);
+
+ if (strlen(newkey) != ASMC_KEYLEN)
+ return (EINVAL);
+
+ /* Get key info to auto-detect length and type */
+ if (asmc_key_getinfo(dev, newkey, &keylen, sc->sc_rawtype) != 0)
+ return (ENOENT);
+
+ if (keylen > ASMC_MAXVAL)
+ keylen = ASMC_MAXVAL;
+
+ strlcpy(sc->sc_rawkey, newkey, sizeof(sc->sc_rawkey));
+ sc->sc_rawlen = keylen;
+ memset(sc->sc_rawval, 0, sizeof(sc->sc_rawval));
+
+ /* Read the key value */
+ asmc_key_read(dev, sc->sc_rawkey, sc->sc_rawval, sc->sc_rawlen);
+
+ return (0);
+}
+
+static int
+asmc_raw_value_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev = (device_t) arg1;
+ struct asmc_softc *sc = device_get_softc(dev);
+ char hexbuf[ASMC_MAXVAL * 2 + 1];
+ int error, i;
+
+ /* Refresh from SMC if a key has been selected. */
+ if (sc->sc_rawkey[0] != '\0') {
+ asmc_key_read(dev, sc->sc_rawkey, sc->sc_rawval,
+ sc->sc_rawlen > 0 ? sc->sc_rawlen : ASMC_MAXVAL);
+ }
+
+ /* Format as hex string */
+ for (i = 0; i < sc->sc_rawlen && i < ASMC_MAXVAL; i++)
+ snprintf(hexbuf + i * 2, 3, "%02x", sc->sc_rawval[i]);
+ hexbuf[i * 2] = '\0';
+
+ error = sysctl_handle_string(oidp, hexbuf, sizeof(hexbuf), req);
+ if (error || req->newptr == NULL)
+ return (error);
+
+ /* Reject writes until a key is selected via raw.key. */
+ if (sc->sc_rawkey[0] == '\0')
+ return (EINVAL);
+
+ memset(sc->sc_rawval, 0, sizeof(sc->sc_rawval));
+ for (i = 0; i < sc->sc_rawlen && hexbuf[i*2] && hexbuf[i*2+1]; i++) {
+ unsigned int val;
+ char tmp[3] = { hexbuf[i*2], hexbuf[i*2+1], 0 };
+ if (sscanf(tmp, "%02x", &val) == 1)
+ sc->sc_rawval[i] = (uint8_t)val;
+ }
+
+ if (asmc_key_write(dev, sc->sc_rawkey, sc->sc_rawval, sc->sc_rawlen) != 0)
+ return (EIO);
+
+ return (0);
+}
+
+static int
+asmc_raw_len_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev = (device_t) arg1;
+ struct asmc_softc *sc = device_get_softc(dev);
+
+ return (sysctl_handle_8(oidp, &sc->sc_rawlen, 0, req));
+}
+
+static int
+asmc_raw_type_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev = (device_t) arg1;
+ struct asmc_softc *sc = device_get_softc(dev);
+
+ return (sysctl_handle_string(oidp, sc->sc_rawtype,
+ sizeof(sc->sc_rawtype), req));
+}
+#endif
+
+/*
+ * Convert signed fixed-point SMC values to milli-units.
+ * Format "spXY" means signed with X integer bits and Y fraction bits.
+ */
+static int
+asmc_sp78_to_milli(const uint8_t *buf)
+{
+ int16_t val = (int16_t)be16dec(buf);
+
+ return ((int)val * 1000) / 256;
+}
+
+static int
+asmc_sp87_to_milli(const uint8_t *buf)
+{
+ int16_t val = (int16_t)be16dec(buf);
+
+ return ((int)val * 1000) / 128;
+}
+
+static int
+asmc_sp4b_to_milli(const uint8_t *buf)
+{
+ int16_t val = (int16_t)be16dec(buf);
+
+ return ((int)val * 1000) / 2048;
+}
+
+static int
+asmc_sp5a_to_milli(const uint8_t *buf)
+{
+ int16_t val = (int16_t)be16dec(buf);
+
+ return ((int)val * 1000) / 1024;
+}
+
+static int
+asmc_sp69_to_milli(const uint8_t *buf)
+{
+ int16_t val = (int16_t)be16dec(buf);
+
+ return ((int)val * 1000) / 512;
+}
+
+static int
+asmc_sp96_to_milli(const uint8_t *buf)
+{
+ int16_t val = (int16_t)be16dec(buf);
+
+ return ((int)val * 1000) / 64;
+}
+
+static int
+asmc_sp2d_to_milli(const uint8_t *buf)
+{
+ int16_t val = (int16_t)be16dec(buf);
+
+ return ((int)val * 1000) / 8192;
+}
+
+static bool
+asmc_sensor_type_supported(const char *type)
+{
+
+ return (strncmp(type, "sp78", 4) == 0 ||
+ strncmp(type, "sp87", 4) == 0 ||
+ strncmp(type, "sp4b", 4) == 0 ||
+ strncmp(type, "sp5a", 4) == 0 ||
+ strncmp(type, "sp69", 4) == 0 ||
+ strncmp(type, "sp96", 4) == 0 ||
+ strncmp(type, "sp2d", 4) == 0 ||
+ strncmp(type, "ui16", 4) == 0);
+}
+
+/*
+ * Generic sensor value reader with automatic type conversion.
+ * Reads an SMC key, detects its type, and converts to millivalue.
+ */
+static int
+asmc_sensor_read(device_t dev, const char *key, int *millivalue)
+{
+ uint8_t buf[2];
+ char type[ASMC_TYPELEN + 1];
+ uint8_t len;
+ int error;
+
+ error = asmc_key_getinfo(dev, key, &len, type);
+ if (error != 0)
+ return (error);
+
+ if (len != 2) {
+ if (bootverbose)
+ device_printf(dev,
+ "%s: key %s unexpected length %d\n",
+ __func__, key, len);
+ return (ENXIO);
+ }
+
+ error = asmc_key_read(dev, key, buf, sizeof(buf));
+ if (error != 0)
+ return (error);
+
+ if (strncmp(type, "sp78", 4) == 0) {
+ *millivalue = asmc_sp78_to_milli(buf);
+ } else if (strncmp(type, "sp87", 4) == 0) {
+ *millivalue = asmc_sp87_to_milli(buf);
+ } else if (strncmp(type, "sp4b", 4) == 0) {
+ *millivalue = asmc_sp4b_to_milli(buf);
+ } else if (strncmp(type, "sp5a", 4) == 0) {
+ *millivalue = asmc_sp5a_to_milli(buf);
+ } else if (strncmp(type, "sp69", 4) == 0) {
+ *millivalue = asmc_sp69_to_milli(buf);
+ } else if (strncmp(type, "sp96", 4) == 0) {
+ *millivalue = asmc_sp96_to_milli(buf);
+ } else if (strncmp(type, "sp2d", 4) == 0) {
+ *millivalue = asmc_sp2d_to_milli(buf);
+ } else if (strncmp(type, "ui16", 4) == 0) {
+ *millivalue = be16dec(buf);
} else {
- char buf[1024];
- char buf2[8];
- mtx_unlock_spin(&sc->sc_mtx);
- maxlen = type[0];
- type[0] = ' ';
- type[5] = 0;
- if (maxlen > sizeof(v)) {
+ if (bootverbose)
device_printf(dev,
- "WARNING: cropping maxlen from %d to %zu\n", maxlen,
- sizeof(v));
- maxlen = sizeof(v);
+ "%s: unknown type '%s' for key %s\n",
+ __func__, type, key);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+/*
+ * Generic sensor sysctl handler for voltage/current/power/light sensors.
+ * arg2 encodes: sensor_type (high byte) | sensor_index (low byte)
+ * Sensor types: 'V'=voltage, 'I'=current, 'P'=power, 'L'=light
+ */
+static int
+asmc_sensor_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev = (device_t) arg1;
+ struct asmc_softc *sc = device_get_softc(dev);
+ int error, val;
+ int sensor_type = (arg2 >> 8) & 0xFF;
+ int sensor_idx = arg2 & 0xFF;
+ const char *key = NULL;
+
+ /* Select sensor based on type and index */
+ switch (sensor_type) {
+ case 'V': /* Voltage */
+ if (sensor_idx < sc->sc_voltage_count)
+ key = sc->sc_voltage_sensors[sensor_idx];
+ break;
+ case 'I': /* Current */
+ if (sensor_idx < sc->sc_current_count)
+ key = sc->sc_current_sensors[sensor_idx];
+ break;
+ case 'P': /* Power */
+ if (sensor_idx < sc->sc_power_count)
+ key = sc->sc_power_sensors[sensor_idx];
+ break;
+ case 'L': /* Light */
+ if (sensor_idx < sc->sc_light_count)
+ key = sc->sc_light_sensors[sensor_idx];
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (key == NULL)
+ return (ENOENT);
+
+ error = asmc_sensor_read(dev, key, &val);
+ if (error != 0)
+ return (error);
+
+ return (sysctl_handle_int(oidp, &val, 0, req));
+}
+
+/*
+ * Detect and register voltage/current/power/ambient sensors.
+ * Scans all SMC keys and identifies sensor keys by prefix.
+ * Returns 0 on success, -1 on error.
+ */
+static int
+asmc_detect_sensors(device_t dev)
+{
+ struct asmc_softc *sc = device_get_softc(dev);
+ struct sysctl_ctx_list *sysctlctx;
+ struct sysctl_oid *tree_node;
+ char key[ASMC_KEYLEN + 1];
+ char type[ASMC_TYPELEN + 1];
+ uint8_t len;
+ unsigned int nkeys;
+ unsigned int i;
+ int error;
+ char *sensor_key;
+
+ /* Initialize counts */
+ sc->sc_voltage_count = 0;
+ sc->sc_current_count = 0;
+ sc->sc_power_count = 0;
+ sc->sc_light_count = 0;
+
+ if (sc->sc_nkeys == 0)
+ return (0);
+ nkeys = sc->sc_nkeys;
+
+ /* Scan all keys for voltage/current/power/ambient light sensors */
+ for (i = 0; i < nkeys; i++) {
+ /* Get key name by index */
+ error = asmc_key_dump_by_index(dev, i, key, type, &len);
+ if (error != 0)
+ continue;
+ if (!asmc_sensor_type_supported(type))
+ continue;
+
+ /* Voltage sensors (VC*, VD*, VG*, VP*, VI*) */
+ if (key[0] == 'V' && (key[1] == 'C' || key[1] == 'D' ||
+ key[1] == 'G' || key[1] == 'P' || key[1] == 'I') &&
+ len == 2) {
+ if (sc->sc_voltage_count >= ASMC_MAX_SENSORS)
+ continue;
+ sensor_key = malloc(ASMC_KEYLEN + 1,
+ M_DEVBUF, M_WAITOK);
+ memcpy(sensor_key, key, ASMC_KEYLEN + 1);
+ sc->sc_voltage_sensors[sc->sc_voltage_count++] =
+ sensor_key;
+ } else if (key[0] == 'I' && (key[1] == 'C' ||
+ key[1] == 'D' || key[1] == 'G' || key[1] == 'M' ||
+ key[1] == 'N' || key[1] == 'O' || key[1] == 'H' ||
+ key[1] == 'P' || key[1] == 'B' || key[1] == 'A' ||
+ key[1] == 'L') && len == 2) {
+ /* Current sensors */
+ if (sc->sc_current_count >= ASMC_MAX_SENSORS)
+ continue;
+ sensor_key = malloc(ASMC_KEYLEN + 1,
+ M_DEVBUF, M_WAITOK);
+ memcpy(sensor_key, key, ASMC_KEYLEN + 1);
+ sc->sc_current_sensors[sc->sc_current_count++] =
+ sensor_key;
+ } else if (key[0] == 'P' && (key[1] == 'C' ||
+ key[1] == 'D' || key[1] == 'N' || key[1] == 'S' ||
+ key[1] == 'T' || key[1] == 'H' || key[1] == 'F' ||
+ key[1] == 'Z' || key[1] == 'z') && len == 2) {
+ /* Power sensors */
+ if (sc->sc_power_count >= ASMC_MAX_SENSORS)
+ continue;
+ sensor_key = malloc(ASMC_KEYLEN + 1,
+ M_DEVBUF, M_WAITOK);
+ memcpy(sensor_key, key, ASMC_KEYLEN + 1);
+ sc->sc_power_sensors[sc->sc_power_count++] =
+ sensor_key;
+ } else if (key[0] == 'A' && key[1] == 'L' &&
+ (key[2] == 'V' || key[2] == 'S') && len == 2) {
+ /* Ambient light sensors */
+ if (sc->sc_light_count >= ASMC_MAX_SENSORS)
+ continue;
+ sensor_key = malloc(ASMC_KEYLEN + 1,
+ M_DEVBUF, M_WAITOK);
+ memcpy(sensor_key, key, ASMC_KEYLEN + 1);
+ sc->sc_light_sensors[sc->sc_light_count++] =
+ sensor_key;
}
- for (i = 0; i < sizeof(v); i++) {
- v[i] = 0;
+ }
+
+ if (bootverbose)
+ device_printf(dev,
+ "detected %d voltage, %d current, "
+ "%d power, %d light sensors\n",
+ sc->sc_voltage_count, sc->sc_current_count,
+ sc->sc_power_count, sc->sc_light_count);
+
+ /* Register sysctls for detected sensors */
+ sysctlctx = device_get_sysctl_ctx(dev);
+
+ /* Voltage sensors */
+ if (sc->sc_voltage_count > 0) {
+ tree_node = SYSCTL_ADD_NODE(sysctlctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "voltage", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Voltage sensors (millivolts)");
+
+ for (i = 0; i < sc->sc_voltage_count; i++) {
+ SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(tree_node),
+ OID_AUTO, sc->sc_voltage_sensors[i],
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ dev, ('V' << 8) | i, asmc_sensor_sysctl, "I",
+ "Voltage sensor (millivolts)");
}
- asmc_key_read(dev, key, v, maxlen);
- snprintf(buf, sizeof(buf),
- "key %d is: %s, type %s (len %d), data",
- number, key, type, maxlen);
- for (i = 0; i < maxlen; i++) {
- snprintf(buf2, sizeof(buf2), " %02x", v[i]);
- strlcat(buf, buf2, sizeof(buf));
+ }
+
+ /* Current sensors */
+ if (sc->sc_current_count > 0) {
+ tree_node = SYSCTL_ADD_NODE(sysctlctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "current", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Current sensors (milliamps)");
+
+ for (i = 0; i < sc->sc_current_count; i++) {
+ SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(tree_node),
+ OID_AUTO, sc->sc_current_sensors[i],
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ dev, ('I' << 8) | i, asmc_sensor_sysctl, "I",
+ "Current sensor (milliamps)");
+ }
+ }
+
+ /* Power sensors */
+ if (sc->sc_power_count > 0) {
+ tree_node = SYSCTL_ADD_NODE(sysctlctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "power", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Power sensors (milliwatts)");
+
+ for (i = 0; i < sc->sc_power_count; i++) {
+ SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(tree_node),
+ OID_AUTO, sc->sc_power_sensors[i],
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ dev, ('P' << 8) | i, asmc_sensor_sysctl, "I",
+ "Power sensor (milliwatts)");
+ }
+ }
+
+ /* Ambient light sensors */
+ if (sc->sc_light_count > 0) {
+ tree_node = SYSCTL_ADD_NODE(sysctlctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "ambient", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Ambient light sensors");
+
+ for (i = 0; i < sc->sc_light_count; i++) {
+ SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(tree_node),
+ OID_AUTO, sc->sc_light_sensors[i],
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ dev, ('L' << 8) | i, asmc_sensor_sysctl, "I",
+ "Light sensor value");
}
- strlcat(buf, " \n", sizeof(buf));
- device_printf(dev, "%s", buf);
}
+ return (0);
+}
+
+/*
+ * Helper function to get key info by index (for sensor detection).
+ */
+static int
+asmc_key_dump_by_index(device_t dev, int index, char *key_out,
+ char *type_out, uint8_t *len_out)
+{
+ struct asmc_softc *sc = device_get_softc(dev);
+ uint8_t index_buf[ASMC_KEYLEN];
+ uint8_t key_buf[ASMC_KEYLEN];
+ uint8_t info_buf[ASMC_KEYINFO_RESPLEN];
+ int error = ENXIO, try = 0;
+ int i;
+
+ mtx_lock_spin(&sc->sc_mtx);
+
+ index_buf[0] = (index >> 24) & 0xff;
+ index_buf[1] = (index >> 16) & 0xff;
+ index_buf[2] = (index >> 8) & 0xff;
+ index_buf[3] = index & 0xff;
+
+begin:
+ if (asmc_command(dev, ASMC_CMDGETBYINDEX))
+ goto out;
+
+ for (i = 0; i < ASMC_KEYLEN; i++) {
+ ASMC_DATAPORT_WRITE(sc, index_buf[i]);
+ if (asmc_wait(dev, ASMC_STATUS_AWAIT_DATA))
+ goto out;
+ }
+
+ ASMC_DATAPORT_WRITE(sc, ASMC_KEYLEN);
+
+ for (i = 0; i < ASMC_KEYLEN; i++) {
+ if (asmc_wait(dev, ASMC_STATUS_DATA_READY))
+ goto out;
+ key_buf[i] = ASMC_DATAPORT_READ(sc);
+ }
+
+ if (asmc_command(dev, ASMC_CMDGETINFO))
+ goto out;
+
+ for (i = 0; i < ASMC_KEYLEN; i++) {
+ ASMC_DATAPORT_WRITE(sc, key_buf[i]);
+ if (asmc_wait(dev, ASMC_STATUS_AWAIT_DATA))
+ goto out;
+ }
+
+ ASMC_DATAPORT_WRITE(sc, ASMC_KEYINFO_RESPLEN);
+
+ for (i = 0; i < ASMC_KEYINFO_RESPLEN; i++) {
+ if (asmc_wait(dev, ASMC_STATUS_DATA_READY))
+ goto out;
+ info_buf[i] = ASMC_DATAPORT_READ(sc);
+ }
+
+ memcpy(key_out, key_buf, ASMC_KEYLEN);
+ key_out[ASMC_KEYLEN] = '\0';
+ *len_out = info_buf[0];
+ memcpy(type_out, &info_buf[1], ASMC_TYPELEN);
+ type_out[ASMC_TYPELEN] = '\0';
+ error = 0;
+
+out:
+ if (error) {
+ if (++try < ASMC_MAXRETRIES)
+ goto begin;
+ }
+
+ mtx_unlock_spin(&sc->sc_mtx);
return (error);
}
-#endif
static int
asmc_key_write(device_t dev, const char *key, uint8_t *buf, uint8_t len)
@@ -1738,6 +2400,7 @@ static int
asmc_mbp_sysctl_light_control(SYSCTL_HANDLER_ARGS)
{
device_t dev = (device_t)arg1;
+ struct asmc_softc *sc = device_get_softc(dev);
uint8_t buf[2];
int error;
int v;
@@ -1749,6 +2412,7 @@ asmc_mbp_sysctl_light_control(SYSCTL_HANDLER_ARGS)
if (v < 0 || v > 255)
return (EINVAL);
light_control = v;
+ sc->sc_kbd_bkl_level = v * 100 / 255;
buf[0] = light_control;
buf[1] = 0x00;
asmc_key_write(dev, ASMC_KEY_LIGHTVALUE, buf, sizeof(buf));
@@ -1816,3 +2480,38 @@ asmc_wol_sysctl(SYSCTL_HANDLER_ARGS)
return (0);
}
+
+static int
+asmc_backlight_update_status(device_t dev, struct backlight_props *props)
+{
+ struct asmc_softc *sc = device_get_softc(dev);
+ uint8_t buf[2];
+
+ sc->sc_kbd_bkl_level = props->brightness;
+ light_control = props->brightness * 255 / 100;
+ buf[0] = light_control;
+ buf[1] = 0x00;
+ asmc_key_write(dev, ASMC_KEY_LIGHTVALUE, buf, sizeof(buf));
+
+ return (0);
+}
+
+static int
+asmc_backlight_get_status(device_t dev, struct backlight_props *props)
+{
+ struct asmc_softc *sc = device_get_softc(dev);
+
+ props->brightness = sc->sc_kbd_bkl_level;
+ props->nlevels = 0;
+
+ return (0);
+}
+
+static int
+asmc_backlight_get_info(device_t dev, struct backlight_info *info)
+{
+ info->type = BACKLIGHT_TYPE_KEYBOARD;
+ strlcpy(info->name, "Apple MacBook Keyboard", BACKLIGHTMAXNAMELENGTH);
+
+ return (0);
+}
diff --git a/sys/dev/asmc/asmcvar.h b/sys/dev/asmc/asmcvar.h
index 95a117f59533..43f679f3fef0 100644
--- a/sys/dev/asmc/asmcvar.h
+++ b/sys/dev/asmc/asmcvar.h
@@ -28,6 +28,10 @@
*/
#define ASMC_MAXFANS 6
+#define ASMC_MAXVAL 32 /* Maximum SMC value size */
+#define ASMC_KEYLEN 4 /* SMC key name length */
+#define ASMC_TYPELEN 4 /* SMC type string length */
+#define ASMC_MAX_SENSORS 64 /* Max sensors per type */
struct asmc_softc {
device_t sc_dev;
@@ -51,6 +55,25 @@ struct asmc_softc {
struct taskqueue *sc_sms_tq;
struct task sc_sms_task;
uint8_t sc_sms_intr_works;
+ struct cdev *sc_kbd_bkl;
+ uint32_t sc_kbd_bkl_level;
+#ifdef ASMC_DEBUG
+ /* Raw key access */
+ struct sysctl_oid *sc_raw_tree;
+ char sc_rawkey[ASMC_KEYLEN + 1];
+ uint8_t sc_rawval[ASMC_MAXVAL];
+ uint8_t sc_rawlen;
+ char sc_rawtype[ASMC_TYPELEN + 1];
+#endif
+ /* Voltage/Current/Power/Light sensors */
+ char *sc_voltage_sensors[ASMC_MAX_SENSORS];
+ int sc_voltage_count;
+ char *sc_current_sensors[ASMC_MAX_SENSORS];
+ int sc_current_count;
+ char *sc_power_sensors[ASMC_MAX_SENSORS];
+ int sc_power_count;
+ char *sc_light_sensors[ASMC_MAX_SENSORS];
+ int sc_light_count;
};
/*
@@ -69,6 +92,14 @@ struct asmc_softc {
bus_write_1(sc->sc_ioport, 0x04, val)
#define ASMC_CMDREAD 0x10
#define ASMC_CMDWRITE 0x11
+#define ASMC_CMDGETBYINDEX 0x12
+#define ASMC_CMDGETINFO 0x13
+
+#define ASMC_STATUS_AWAIT_DATA 0x04
+#define ASMC_STATUS_DATA_READY 0x05
+
+#define ASMC_KEYINFO_RESPLEN 6 /* getinfo: 1 len + 4 type + 1 attr */
+#define ASMC_MAXRETRIES 10
/*
* Interrupt port.
@@ -88,7 +119,7 @@ struct asmc_softc {
#define ASMC_KEY_FANMANUAL "FS! " /* RW; 2 bytes */
#define ASMC_KEY_FANID "F%dID" /* RO; 16 bytes */
#define ASMC_KEY_FANSPEED "F%dAc" /* RO; 2 bytes */
-#define ASMC_KEY_FANMINSPEED "F%dMn" /* RO; 2 bytes */
+#define ASMC_KEY_FANMINSPEED "F%dMn" /* RW; 2 bytes */
#define ASMC_KEY_FANMAXSPEED "F%dMx" /* RO; 2 bytes */
#define ASMC_KEY_FANSAFESPEED "F%dSf" /* RO; 2 bytes */
#define ASMC_KEY_FANTARGETSPEED "F%dTg" /* RW; 2 bytes */
@@ -543,6 +574,24 @@ struct asmc_softc {
"Pbus", "Ambient Light", "Leftside", "Rightside", "CPU Package Core", \
"CPU Package GPU", "CPU Package Total", "System Total", "DC In" }
+#define ASMC_MBP131_TEMPS { "TB0T", "TB1T", "TB2T", "TC0F", \
+ "TC0P", "TC1C", "TC2C", "TCGC", \
+ "TCSA", "TCXC", "Th1H", "TM0P", \
+ "TPCD", "Ts0P", "Ts0S", "TaLC", \
+ "Ts1P", NULL }
+
+#define ASMC_MBP131_TEMPNAMES { "battery", "battery_1", "battery_2", "cpu_die_peci", \
+ "cpu_proximity", "cpu_core_1", "cpu_core_2", "intel_gpu", \
+ "cpu_sys_agent", "cpu_core_peci", "right_fin_stack", "memory_proximity", \
+ "platform_ctrl_hub", "trackpad", "bottom_skin", "air_flow", \
+ "trackpad_act" }
+
+#define ASMC_MBP131_TEMPDESCS { "Battery", "Battery Sensor 1", "Battery Sensor 2", "CPU Die (PECI)", \
+ "CPU Proximity", "CPU Core 1", "CPU Core 2", "Intel GPU", \
+ "CPU System Agent Core (PECI)", "CPU Core (PECI)", "Right Fin Stack", "DDR3 Proximity", \
+ "Platform Controller Hub Die", "Trackpad", "Bottom Skin", "Air Flow", \
+ "Trackpad Actuator" }
+
#define ASMC_MM_TEMPS { "TN0P", "TN1P", NULL }
#define ASMC_MM_TEMPNAMES { "northbridge1", "northbridge2" }
#define ASMC_MM_TEMPDESCS { "Northbridge Point 1", \
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index 6cf39e035ea6..84992af0c6b8 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -8861,7 +8861,7 @@ bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS)
bzero(sc->nvram_buf, sc->bce_flash_size);
error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size);
- if (error == 0)
+ if (error != 0)
return (error);
if (req->newlen == sc->bce_flash_size)
diff --git a/sys/dev/bhnd/bhnd_bus_if.m b/sys/dev/bhnd/bhnd_bus_if.m
index 6ae56ceeb196..7060f944c99b 100644
--- a/sys/dev/bhnd/bhnd_bus_if.m
+++ b/sys/dev/bhnd/bhnd_bus_if.m
@@ -533,7 +533,7 @@ METHOD int read_board_info {
/**
* Notify a bhnd bus that a child was added.
*
- * This method must be called by concrete bhnd(4) driver impementations
+ * This method must be called by concrete bhnd(4) driver implementations
* after @p child's bus state is fully initialized.
*
* @param dev The bhnd bus whose child is being added.
diff --git a/sys/dev/clk/clk_fixed.c b/sys/dev/clk/clk_fixed.c
index f8dcfb8378cd..6656aeec473e 100644
--- a/sys/dev/clk/clk_fixed.c
+++ b/sys/dev/clk/clk_fixed.c
@@ -157,9 +157,11 @@ clk_fixed_probe(device_t dev)
clk_type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
switch (clk_type) {
case CLK_TYPE_FIXED:
- if (OF_hasprop(ofw_bus_get_node(dev), "clock-frequency") == 0) {
- device_printf(dev,
- "clock-fixed has no clock-frequency\n");
+ if (!OF_hasprop(ofw_bus_get_node(dev), "clock-frequency")) {
+ if (bootverbose) {
+ device_printf(dev,
+ "clock-fixed has no clock-frequency\n");
+ }
return (ENXIO);
}
device_set_desc(dev, "Fixed clock");
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 494f83a47135..f4eef54e5c6b 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -4797,7 +4797,6 @@ struct intr_info {
static inline char
intr_alert_char(u32 cause, u32 enable, u32 fatal)
{
-
if (cause & fatal)
return ('!');
if (cause & enable)
@@ -4817,7 +4816,7 @@ show_intr_info(struct adapter *sc, const struct intr_info *ii, uint32_t cause,
if (verbose || ucause != 0 || flags & IHF_RUN_ALL_ACTIONS) {
alert = intr_alert_char(cause, enabled, fatal);
CH_ALERT(sc, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n", alert,
- ii->name, ii->cause_reg, cause, enabled, fatal);
+ ii->name, ii->cause_reg, cause, enabled, ii->fatal);
}
leftover = verbose ? cause : ucause;
@@ -4829,8 +4828,10 @@ show_intr_info(struct adapter *sc, const struct intr_info *ii, uint32_t cause,
CH_ALERT(sc, " %c [0x%08x] %s\n", alert, msgbits, details->msg);
leftover &= ~msgbits;
}
- if (leftover != 0 && leftover != (verbose ? cause : ucause))
- CH_ALERT(sc, " ? [0x%08x]\n", leftover);
+ if (leftover != 0 && leftover != (verbose ? cause : ucause)) {
+ alert = intr_alert_char(leftover, enabled, fatal);
+ CH_ALERT(sc, " %c [0x%08x]\n", alert, leftover);
+ }
}
/*
@@ -6102,6 +6103,14 @@ static bool mem_intr_handler(struct adapter *adap, int idx, int flags)
{ F_PERR_INT_CAUSE, "FIFO parity error" },
{ 0 }
};
+ static const struct intr_details t7_mem_intr_details[] = {
+ { F_DDRPHY_INT_CAUSE, "DDRPHY" },
+ { F_DDRCTL_INT_CAUSE, "DDRCTL" },
+ { F_T7_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" },
+ { F_T7_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" },
+ { F_PERR_INT_CAUSE, "FIFO parity error" },
+ { 0 }
+ };
char rname[32];
struct intr_info ii = {
.name = &rname[0],
@@ -6156,6 +6165,8 @@ static bool mem_intr_handler(struct adapter *adap, int idx, int flags)
} else {
ii.cause_reg = MC_T7_REG(A_T7_MC_P_INT_CAUSE, i);
ii.enable_reg = MC_T7_REG(A_T7_MC_P_INT_ENABLE, i);
+ ii.fatal = F_PERR_INT_CAUSE | F_T7_ECC_UE_INT_CAUSE;
+ ii.details = t7_mem_intr_details;
count_reg = MC_T7_REG(A_T7_MC_P_ECC_STATUS, i);
}
fatal |= t4_handle_intr(adap, &ii, 0, flags);
diff --git a/sys/dev/cxgbe/crypto/t6_kern_tls.c b/sys/dev/cxgbe/crypto/t6_kern_tls.c
index 454b2e264a0e..584e5015acfa 100644
--- a/sys/dev/cxgbe/crypto/t6_kern_tls.c
+++ b/sys/dev/cxgbe/crypto/t6_kern_tls.c
@@ -458,15 +458,15 @@ t6_tls_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
}
inp = params->tls.inp;
+ tp = intotcpcb(inp);
INP_RLOCK(inp);
- if (inp->inp_flags & INP_DROPPED) {
+ if (tp->t_flags & TF_DISCONNECTED) {
INP_RUNLOCK(inp);
error = ECONNRESET;
goto failed;
}
tlsp->inp = inp;
- tp = intotcpcb(inp);
if (tp->t_flags & TF_REQ_TSTMP) {
tlsp->using_timestamps = true;
if ((tp->ts_offset & 0xfffffff) != 0) {
@@ -501,7 +501,7 @@ t6_tls_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
goto failed;
}
- if (inp->inp_flags & INP_DROPPED) {
+ if (tp->t_flags & TF_DISCONNECTED) {
INP_RUNLOCK(inp);
error = ECONNRESET;
goto failed;
diff --git a/sys/dev/cxgbe/crypto/t7_kern_tls.c b/sys/dev/cxgbe/crypto/t7_kern_tls.c
index d9710b5bd13f..b6078b9b53b6 100644
--- a/sys/dev/cxgbe/crypto/t7_kern_tls.c
+++ b/sys/dev/cxgbe/crypto/t7_kern_tls.c
@@ -246,7 +246,7 @@ t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
inp = params->tls.inp;
INP_RLOCK(inp);
- if (inp->inp_flags & INP_DROPPED) {
+ if (intotcpcb(inp)->t_flags & TF_DISCONNECTED) {
INP_RUNLOCK(inp);
error = ECONNRESET;
goto failed;
diff --git a/sys/dev/cxgbe/cxgbei/cxgbei.c b/sys/dev/cxgbe/cxgbei/cxgbei.c
index ccca45f5f761..4b341c9d37b2 100644
--- a/sys/dev/cxgbe/cxgbei/cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/cxgbei.c
@@ -499,10 +499,11 @@ do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
toep->ofld_rxq->rx_iscsi_ddp_octets += ip->ip_data_len;
}
+ tp = intotcpcb(inp);
INP_WLOCK(inp);
- if (__predict_false(inp->inp_flags & INP_DROPPED)) {
- CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
- __func__, tid, pdu_len, inp->inp_flags);
+ if (__predict_false(tp->t_flags & TF_DISCONNECTED)) {
+ CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), t_flags 0x%x",
+ __func__, tid, pdu_len, tp->t_flags);
INP_WUNLOCK(inp);
icl_cxgbei_conn_pdu_free(NULL, ip);
toep->ulpcb2 = NULL;
@@ -513,7 +514,6 @@ do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
* T6+ does not report data PDUs received via DDP without F
* set. This can result in gaps in the TCP sequence space.
*/
- tp = intotcpcb(inp);
MPASS(chip_id(sc) >= CHELSIO_T6 || icp->icp_seq == tp->rcv_nxt);
tp->rcv_nxt = icp->icp_seq + pdu_len;
tp->t_rcvtime = ticks;
@@ -652,10 +652,11 @@ do_rx_iscsi_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
toep->ofld_rxq->rx_iscsi_data_digest_errors++;
}
+ tp = intotcpcb(inp);
INP_WLOCK(inp);
- if (__predict_false(inp->inp_flags & INP_DROPPED)) {
- CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
- __func__, tid, pdu_len, inp->inp_flags);
+ if (__predict_false(tp->t_flags & TF_DISCONNECTED)) {
+ CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), t_flags 0x%x",
+ __func__, tid, pdu_len, tp->t_flags);
INP_WUNLOCK(inp);
icl_cxgbei_conn_pdu_free(NULL, ip);
toep->ulpcb2 = NULL;
@@ -663,8 +664,6 @@ do_rx_iscsi_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (0);
}
- tp = intotcpcb(inp);
-
/*
* If icc is NULL, the connection is being closed in
* icl_cxgbei_conn_close(), just drop this data.
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index d90d7904a8ae..2e7767a0fc27 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -434,6 +434,7 @@ icl_cxgbei_tx_main(void *arg)
struct toepcb *toep = icc->toep;
struct socket *so = ic->ic_socket;
struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = intotcpcb(inp);
struct icl_pdu *ip;
struct mbuf *m;
struct mbufq mq;
@@ -476,7 +477,7 @@ icl_cxgbei_tx_main(void *arg)
INP_WLOCK(inp);
ICL_CONN_UNLOCK(ic);
- if (__predict_false(inp->inp_flags & INP_DROPPED) ||
+ if (__predict_false(tp->t_flags & TF_DISCONNECTED) ||
__predict_false((toep->flags & TPF_ATTACHED) == 0)) {
mbufq_drain(&mq);
} else {
@@ -1080,7 +1081,7 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
inp = sotoinpcb(so);
INP_WLOCK(inp);
tp = intotcpcb(inp);
- if (inp->inp_flags & INP_DROPPED) {
+ if (tp->t_flags & TF_DISCONNECTED) {
INP_WUNLOCK(inp);
error = ENOTCONN;
goto out;
@@ -1334,6 +1335,7 @@ icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip,
struct cxgbei_ddp_state *ddp;
struct ppod_reservation *prsv;
struct inpcb *inp;
+ struct tcpcb *tp;
struct mbufq mq;
uint32_t itt;
int rc = 0;
@@ -1421,8 +1423,9 @@ no_ddp:
* detached already.
*/
inp = sotoinpcb(ic->ic_socket);
+ tp = intotcpcb(inp);
INP_WLOCK(inp);
- if ((inp->inp_flags & INP_DROPPED) != 0) {
+ if ((tp->t_flags & TF_DISCONNECTED) != 0) {
INP_WUNLOCK(inp);
mbufq_drain(&mq);
t4_free_page_pods(prsv);
@@ -1497,6 +1500,7 @@ icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, struct icl_pdu *ip,
struct ppod_reservation *prsv;
struct ctl_sg_entry *sgl, sg_entry;
struct inpcb *inp;
+ struct tcpcb *tp;
struct mbufq mq;
int sg_entries = ctsio->kern_sg_entries;
uint32_t ttt;
@@ -1597,9 +1601,10 @@ no_ddp:
return (ECONNRESET);
}
inp = sotoinpcb(ic->ic_socket);
+ tp = intotcpcb(inp);
INP_WLOCK(inp);
ICL_CONN_UNLOCK(ic);
- if ((inp->inp_flags & INP_DROPPED) != 0) {
+ if ((tp->t_flags & TF_DISCONNECTED) != 0) {
INP_WUNLOCK(inp);
mbufq_drain(&mq);
t4_free_page_pods(prsv);
diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c
index cbf4bae00a60..372fc5418b91 100644
--- a/sys/dev/cxgbe/iw_cxgbe/qp.c
+++ b/sys/dev/cxgbe/iw_cxgbe/qp.c
@@ -64,7 +64,7 @@ struct cpl_set_tcb_rpl;
#include "iw_cxgbe.h"
#include "user.h"
-static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize);
+static int creds(struct toepcb *toep, struct tcpcb *tp, size_t wrsize);
static int max_fr_immd = T4_MAX_FR_IMMD;//SYSCTL parameter later...
static int alloc_ird(struct c4iw_dev *dev, u32 ird)
@@ -1149,7 +1149,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
term->ecode = qhp->attr.ecode;
} else
build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
- ret = creds(toep, inp, sizeof(*wqe));
+ ret = creds(toep, tp, sizeof(*wqe));
if (ret) {
free_wrqe(wr);
return;
@@ -1253,8 +1253,7 @@ rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
int ret;
struct wrqe *wr;
struct socket *so = ep->com.so;
- struct inpcb *inp = sotoinpcb(so);
- struct tcpcb *tp = intotcpcb(inp);
+ struct tcpcb *tp = intotcpcb(sotoinpcb(so));
struct toepcb *toep = tp->t_toe;
KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__));
@@ -1277,7 +1276,7 @@ rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
c4iw_init_wr_wait(&ep->com.wr_wait);
- ret = creds(toep, inp, sizeof(*wqe));
+ ret = creds(toep, tp, sizeof(*wqe));
if (ret) {
free_wrqe(wr);
return ret;
@@ -1315,14 +1314,14 @@ static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
}
static int
-creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
+creds(struct toepcb *toep, struct tcpcb *tp, size_t wrsize)
{
struct ofld_tx_sdesc *txsd;
CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize);
- INP_WLOCK(inp);
- if ((inp->inp_flags & INP_DROPPED) != 0) {
- INP_WUNLOCK(inp);
+ INP_WLOCK(tptoinpcb(tp));
+ if (tp->t_flags & TF_DISCONNECTED) {
+ INP_WUNLOCK(tptoinpcb(tp));
return (EINVAL);
}
txsd = &toep->txsd[toep->txsd_pidx];
@@ -1336,7 +1335,7 @@ creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
toep->txsd_pidx = 0;
toep->txsd_avail--;
- INP_WUNLOCK(inp);
+ INP_WUNLOCK(tptoinpcb(tp));
CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep ,
txsd->tx_credits, toep->tx_credits, toep->txsd_pidx);
return (0);
@@ -1351,8 +1350,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
struct c4iw_rdev *rdev = &qhp->rhp->rdev;
struct adapter *sc = rdev->adap;
struct socket *so = ep->com.so;
- struct inpcb *inp = sotoinpcb(so);
- struct tcpcb *tp = intotcpcb(inp);
+ struct tcpcb *tp = intotcpcb(sotoinpcb(so));
struct toepcb *toep = tp->t_toe;
CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
@@ -1416,7 +1414,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
c4iw_init_wr_wait(&ep->com.wr_wait);
- ret = creds(toep, inp, sizeof(*wqe));
+ ret = creds(toep, tp, sizeof(*wqe));
if (ret) {
free_wrqe(wr);
free_ird(rhp, qhp->attr.max_ird);
diff --git a/sys/dev/cxgbe/nvmf/nvmf_che.c b/sys/dev/cxgbe/nvmf/nvmf_che.c
index 5c2174b8a40b..afdfc2f1b758 100644
--- a/sys/dev/cxgbe/nvmf/nvmf_che.c
+++ b/sys/dev/cxgbe/nvmf/nvmf_che.c
@@ -555,6 +555,7 @@ che_write_adapter_mem(struct nvmf_che_qpair *qp, uint32_t addr, uint32_t len,
struct toepcb *toep = qp->toep;
struct socket *so = qp->so;
struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = intotcpcb(inp);
struct mbufq mq;
int error;
@@ -568,7 +569,7 @@ che_write_adapter_mem(struct nvmf_che_qpair *qp, uint32_t addr, uint32_t len,
goto error;
INP_WLOCK(inp);
- if ((inp->inp_flags & INP_DROPPED) != 0) {
+ if ((tp->t_flags & TF_DISCONNECTED) != 0) {
INP_WUNLOCK(inp);
error = ECONNRESET;
goto error;
@@ -862,12 +863,13 @@ nvmf_che_write_pdu(struct nvmf_che_qpair *qp, struct mbuf *m)
struct epoch_tracker et;
struct socket *so = qp->so;
struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = intotcpcb(inp);
struct toepcb *toep = qp->toep;
CURVNET_SET(so->so_vnet);
NET_EPOCH_ENTER(et);
INP_WLOCK(inp);
- if (__predict_false(inp->inp_flags & INP_DROPPED) ||
+ if (__predict_false(tp->t_flags & TF_DISCONNECTED) ||
__predict_false((toep->flags & TPF_ATTACHED) == 0)) {
m_freem(m);
} else {
@@ -2052,10 +2054,11 @@ do_nvmt_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
("%s: payload length mismatch", __func__));
inp = toep->inp;
+ tp = intotcpcb(inp);
INP_WLOCK(inp);
- if (inp->inp_flags & INP_DROPPED) {
- CTR(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
- __func__, tid, len, inp->inp_flags);
+ if (tp->t_flags & TF_DISCONNECTED) {
+ CTR(KTR_CXGBE, "%s: tid %u, rx (%d bytes), t_flags 0x%x",
+ __func__, tid, len, tp->t_flags);
INP_WUNLOCK(inp);
m_freem(m);
return (0);
@@ -2070,7 +2073,6 @@ do_nvmt_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
mbufq_enqueue(&qp->rx_data, m);
SOCKBUF_UNLOCK(&so->so_rcv);
- tp = intotcpcb(inp);
tp->t_rcvtime = ticks;
#ifdef VERBOSE_TRACES
@@ -2092,6 +2094,7 @@ do_nvmt_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
struct nvmf_che_qpair *qp = toep->ulpcb;
struct socket *so = qp->so;
struct inpcb *inp = toep->inp;
+ struct tcpcb *tp = intotcpcb(inp);
u_int hlen __diagused;
bool empty;
@@ -2107,9 +2110,9 @@ do_nvmt_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
("%s: payload length mismatch", __func__));
INP_WLOCK(inp);
- if (inp->inp_flags & INP_DROPPED) {
- CTR(KTR_CXGBE, "%s: tid %u, rx (hlen %u), inp_flags 0x%x",
- __func__, tid, hlen, inp->inp_flags);
+ if (tp->t_flags & TF_DISCONNECTED) {
+ CTR(KTR_CXGBE, "%s: tid %u, rx (hlen %u), t_flags 0x%x",
+ __func__, tid, hlen, tp->t_flags);
INP_WUNLOCK(inp);
m_freem(m);
return (0);
@@ -2505,7 +2508,7 @@ che_allocate_qpair(bool controller, const nvlist_t *nvl)
inp = sotoinpcb(so);
INP_WLOCK(inp);
tp = intotcpcb(inp);
- if (inp->inp_flags & INP_DROPPED) {
+ if (tp->t_flags & TF_DISCONNECTED) {
INP_WUNLOCK(inp);
free(qp->fl_cid_set, M_NVMF_CHE);
free(qp->fl_cids, M_NVMF_CHE);
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index c236ee060bc2..e5f6053e2cb6 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -78,6 +78,7 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
u_int atid = G_TID_TID(ntohl(cpl->tos_atid));
struct toepcb *toep = lookup_atid(sc, atid);
struct inpcb *inp = toep->inp;
+ struct tcpcb *tp = intotcpcb(inp);
KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));
@@ -95,7 +96,7 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
toep->ctrlq = &sc->sge.ctrlq[toep->params.ctrlq_idx];
}
- if (inp->inp_flags & INP_DROPPED) {
+ if (tp->t_flags & TF_DISCONNECTED) {
/* socket closed by the kernel before hw told us it connected */
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 7e1c497240c2..6e34d5f54897 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -245,13 +245,13 @@ send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
struct cpl_abort_req *req;
int tid = toep->tid;
struct inpcb *inp = toep->inp;
- struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */
+ struct tcpcb *tp = intotcpcb(inp);
INP_WLOCK_ASSERT(inp);
CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s",
__func__, toep->tid,
- inp->inp_flags & INP_DROPPED ? "inp dropped" :
+ tp->t_flags & TF_DISCONNECTED ? "TCP disconnected" :
tcpstates[tp->t_state],
toep->flags, inp->inp_flags,
toep->flags & TPF_ABORT_SHUTDOWN ?
@@ -273,7 +273,7 @@ send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
req = wrtod(wr);
INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid);
- if (inp->inp_flags & INP_DROPPED)
+ if (tp->t_flags & TF_DISCONNECTED)
req->rsvd0 = htobe32(snd_nxt);
else
req->rsvd0 = htobe32(tp->snd_nxt);
@@ -284,7 +284,7 @@ send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
* XXX: What's the correct way to tell that the inp hasn't been detached
* from its socket? Should I even be flushing the snd buffer here?
*/
- if ((inp->inp_flags & INP_DROPPED) == 0) {
+ if ((tp->t_flags & TF_DISCONNECTED) == 0) {
struct socket *so = inp->inp_socket;
if (so != NULL) /* because I'm not sure. See comment above */
@@ -1588,8 +1588,8 @@ t4_tod_output(struct toedev *tod, struct tcpcb *tp)
struct toepcb *toep = tp->t_toe;
INP_WLOCK_ASSERT(inp);
- KASSERT((inp->inp_flags & INP_DROPPED) == 0,
- ("%s: inp %p dropped.", __func__, inp));
+ KASSERT((tp->t_flags & TF_DISCONNECTED) == 0,
+ ("%s: tcpcb %p disconnected", __func__, tp));
KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
t4_push_data(sc, toep, 0);
@@ -1607,8 +1607,8 @@ t4_send_fin(struct toedev *tod, struct tcpcb *tp)
struct toepcb *toep = tp->t_toe;
INP_WLOCK_ASSERT(inp);
- KASSERT((inp->inp_flags & INP_DROPPED) == 0,
- ("%s: inp %p dropped.", __func__, inp));
+ KASSERT((tp->t_flags & TF_DISCONNECTED) == 0,
+ ("%s: tcpcb %p disconnected", __func__, tp));
KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
toep->flags |= TPF_SEND_FIN;
@@ -1628,8 +1628,8 @@ t4_send_rst(struct toedev *tod, struct tcpcb *tp)
struct toepcb *toep = tp->t_toe;
INP_WLOCK_ASSERT(inp);
- KASSERT((inp->inp_flags & INP_DROPPED) == 0,
- ("%s: inp %p dropped.", __func__, inp));
+ KASSERT((tp->t_flags & TF_DISCONNECTED) == 0,
+ ("%s: tcpcb %p disconnected", __func__, tp));
KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
/* hmmmm */
@@ -1921,7 +1921,7 @@ do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
toep->flags |= TPF_ABORT_SHUTDOWN;
- if ((inp->inp_flags & INP_DROPPED) == 0) {
+ if ((tp->t_flags & TF_DISCONNECTED) == 0) {
struct socket *so = inp->inp_socket;
if (so != NULL)
@@ -2010,17 +2010,16 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
m_adj(m, sizeof(*cpl));
len = m->m_pkthdr.len;
+ tp = intotcpcb(inp);
INP_WLOCK(inp);
- if (inp->inp_flags & INP_DROPPED) {
- CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
- __func__, tid, len, inp->inp_flags);
+ if (tp->t_flags & TF_DISCONNECTED) {
+ CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), t_flags 0x%x",
+ __func__, tid, len, tp->t_flags);
INP_WUNLOCK(inp);
m_freem(m);
return (0);
}
- tp = intotcpcb(inp);
-
if (__predict_false(ulp_mode(toep) == ULP_MODE_TLS &&
toep->flags & TPF_TLS_RECEIVE)) {
/* Received "raw" data on a TLS socket. */
@@ -2170,6 +2169,7 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
inp = toep->inp;
+ tp = intotcpcb(inp);
KASSERT(opcode == CPL_FW4_ACK,
("%s: unexpected opcode 0x%x", __func__, opcode));
@@ -2183,10 +2183,8 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (0);
}
- KASSERT((inp->inp_flags & INP_DROPPED) == 0,
- ("%s: inp_flags 0x%x", __func__, inp->inp_flags));
-
- tp = intotcpcb(inp);
+ KASSERT((tp->t_flags & TF_DISCONNECTED) == 0,
+ ("%s: t_flags 0x%x", __func__, tp->t_flags));
if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) {
tcp_seq snd_una = be32toh(cpl->snd_una);
@@ -2627,8 +2625,9 @@ sendanother:
/* Inlined tcp_usr_send(). */
inp = toep->inp;
+ tp = intotcpcb(inp);
INP_WLOCK(inp);
- if (inp->inp_flags & INP_DROPPED) {
+ if (tp->t_flags & TF_DISCONNECTED) {
INP_WUNLOCK(inp);
SOCK_IO_SEND_UNLOCK(so);
error = ECONNRESET;
@@ -2642,8 +2641,7 @@ sendanother:
sbappendstream(sb, m, 0);
m = NULL;
- if (!(inp->inp_flags & INP_DROPPED)) {
- tp = intotcpcb(inp);
+ if (!(tp->t_flags & TF_DISCONNECTED)) {
if (moretocome)
tp->t_flags |= TF_MORETOCOME;
error = tcp_output(tp);
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index 35fb1061d867..9d422c2b793e 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -641,8 +641,8 @@ handle_ddp_data_aio(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt,
uint32_t report = be32toh(ddp_report);
unsigned int db_idx;
struct inpcb *inp = toep->inp;
+ struct tcpcb *tp = intotcpcb(inp);
struct ddp_buffer *db;
- struct tcpcb *tp;
struct socket *so;
struct sockbuf *sb;
struct kaiocb *job;
@@ -664,13 +664,13 @@ handle_ddp_data_aio(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt,
db = &toep->ddp.db[db_idx];
job = db->job;
- if (__predict_false(inp->inp_flags & INP_DROPPED)) {
+ if (__predict_false(tp->t_flags & TF_DISCONNECTED)) {
/*
* This can happen due to an administrative tcpdrop(8).
* Just fail the request with ECONNRESET.
*/
- CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
- __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
+ CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, t_flags 0x%x",
+ __func__, toep->tid, be32toh(rcv_nxt), len, tp->t_flags);
if (aio_clear_cancel_function(job))
ddp_complete_one(job, ECONNRESET);
goto completed;
@@ -859,7 +859,7 @@ handle_ddp_data_rcvbuf(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt,
{
uint32_t report = be32toh(ddp_report);
struct inpcb *inp = toep->inp;
- struct tcpcb *tp;
+ struct tcpcb *tp = intotcpcb(inp);
struct socket *so;
struct sockbuf *sb;
struct ddp_buffer *db;
@@ -881,20 +881,18 @@ handle_ddp_data_rcvbuf(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt,
toep->ddp.active_id, toep->tid));
db = &toep->ddp.db[db_idx];
- if (__predict_false(inp->inp_flags & INP_DROPPED)) {
+ if (__predict_false(tp->t_flags & TF_DISCONNECTED)) {
/*
* This can happen due to an administrative tcpdrop(8).
* Just ignore the received data.
*/
- CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
- __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
+ CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, t_flags 0x%x",
+ __func__, toep->tid, be32toh(rcv_nxt), len, tp->t_flags);
if (invalidated)
complete_ddp_buffer(toep, db, db_idx);
goto out;
}
- tp = intotcpcb(inp);
-
/*
* For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
* sequence number of the next byte to receive. The length of
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index b879f6883f25..359267b7db90 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -886,6 +886,7 @@ do_pass_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
unsigned int status = cpl->status;
struct listen_ctx *lctx = lookup_stid(sc, stid);
struct inpcb *inp = lctx->inp;
+ struct tcpcb *tp = intotcpcb(inp);
#ifdef INVARIANTS
unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
#endif
@@ -911,13 +912,13 @@ do_pass_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
* If the inp has been dropped (listening socket closed) then
* listen_stop must have run and taken the inp out of the hash.
*/
- if (inp->inp_flags & INP_DROPPED) {
+ if (tp->t_flags & TF_DISCONNECTED) {
KASSERT(listen_hash_del(sc, inp) == NULL,
("%s: inp %p still in listen hash", __func__, inp));
}
#endif
- if (inp->inp_flags & INP_DROPPED && status != CPL_ERR_NONE) {
+ if (tp->t_flags & TF_DISCONNECTED && status != CPL_ERR_NONE) {
if (release_lctx(sc, lctx) != NULL)
INP_WUNLOCK(inp);
return (status);
@@ -928,7 +929,7 @@ do_pass_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
* it has started the hardware listener. Stop it; the lctx will be
* released in do_close_server_rpl.
*/
- if (inp->inp_flags & INP_DROPPED) {
+ if (tp->t_flags & TF_DISCONNECTED) {
destroy_server(sc, lctx);
INP_WUNLOCK(inp);
return (status);
@@ -1336,6 +1337,7 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
unsigned int tid = GET_TID(cpl);
struct listen_ctx *lctx = lookup_stid(sc, stid);
struct inpcb *inp;
+ struct tcpcb *tp;
struct socket *so;
struct in_conninfo inc;
struct tcphdr th;
@@ -1477,10 +1479,11 @@ found:
}
inp = lctx->inp; /* listening socket, not owned by TOE */
+ tp = intotcpcb(inp);
INP_RLOCK(inp);
/* Don't offload if the listening socket has closed */
- if (__predict_false(inp->inp_flags & INP_DROPPED)) {
+ if (__predict_false(tp->t_flags & TF_DISCONNECTED)) {
INP_RUNLOCK(inp);
NET_EPOCH_EXIT(et);
REJECT_PASS_ACCEPT_REQ(false);
@@ -1622,6 +1625,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss,
struct synq_entry *synqe = lookup_tid(sc, tid);
struct listen_ctx *lctx = synqe->lctx;
struct inpcb *inp = lctx->inp, *new_inp;
+ struct tcpcb *tp = intotcpcb(inp);
struct socket *so;
struct tcphdr th;
struct tcpopt to;
@@ -1653,7 +1657,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss,
KASSERT(vi->adapter == sc,
("%s: vi %p, sc %p mismatch", __func__, vi, sc));
- if (__predict_false(inp->inp_flags & INP_DROPPED)) {
+ if (__predict_false(tp->t_flags & TF_DISCONNECTED)) {
reset:
send_abort_rpl_synqe(TOEDEV(ifp), synqe, CPL_ABORT_SEND_RST);
INP_WUNLOCK(inp);
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index bbcc1c88c3db..0616279ba15e 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -762,7 +762,7 @@ do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
unsigned int tid = GET_TID(cpl);
struct toepcb *toep = lookup_tid(sc, tid);
struct inpcb *inp = toep->inp;
- struct tcpcb *tp;
+ struct tcpcb *tp = intotcpcb(inp);
int len;
/* XXX: Should this match do_rx_data instead? */
@@ -781,9 +781,9 @@ do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
("%s: payload length mismatch", __func__));
INP_WLOCK(inp);
- if (inp->inp_flags & INP_DROPPED) {
- CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
- __func__, tid, len, inp->inp_flags);
+ if (tp->t_flags & TF_DISCONNECTED) {
+ CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), t_flags 0x%x",
+ __func__, tid, len, tp->t_flags);
INP_WUNLOCK(inp);
m_freem(m);
return (0);
@@ -803,7 +803,6 @@ do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
#endif
}
- tp = intotcpcb(inp);
tp->t_rcvtime = ticks;
#ifdef VERBOSE_TRACES
@@ -824,7 +823,7 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
unsigned int tid = GET_TID(cpl);
struct toepcb *toep = lookup_tid(sc, tid);
struct inpcb *inp = toep->inp;
- struct tcpcb *tp;
+ struct tcpcb *tp = intotcpcb(inp);
struct socket *so;
struct sockbuf *sb;
struct mbuf *tls_data;
@@ -851,9 +850,9 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
("%s: payload length mismatch", __func__));
INP_WLOCK(inp);
- if (inp->inp_flags & INP_DROPPED) {
- CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
- __func__, tid, len, inp->inp_flags);
+ if (tp->t_flags & TF_DISCONNECTED) {
+ CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), t_flags 0x%x",
+ __func__, tid, len, tp->t_flags);
INP_WUNLOCK(inp);
m_freem(m);
return (0);
@@ -862,7 +861,6 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
so = inp_inpcbtosocket(inp);
- tp = intotcpcb(inp);
#ifdef VERBOSE_TRACES
CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 8dfffd465345..950608053be7 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -1830,7 +1830,7 @@ live_tid_failure_cleanup(struct adapter *sc, struct toepcb *toep, u_int status)
INP_WLOCK(inp);
tp = intotcpcb(inp);
toep->flags |= TPF_ABORT_SHUTDOWN;
- if ((inp->inp_flags & INP_DROPPED) == 0) {
+ if ((tp->t_flags & TF_DISCONNECTED) == 0) {
struct socket *so = inp->inp_socket;
if (so != NULL)
@@ -2283,8 +2283,8 @@ find_offload_adapter_cb(struct adapter *sc, void *arg)
struct find_offload_adapter_data *fa = arg;
struct socket *so = fa->so;
struct tom_data *td = sc->tom_softc;
- struct tcpcb *tp;
- struct inpcb *inp;
+ struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = intotcpcb(inp);
/* Non-TCP were filtered out earlier. */
MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
@@ -2295,10 +2295,8 @@ find_offload_adapter_cb(struct adapter *sc, void *arg)
if (td == NULL)
return; /* TOE not enabled on this adapter. */
- inp = sotoinpcb(so);
INP_WLOCK(inp);
- if ((inp->inp_flags & INP_DROPPED) == 0) {
- tp = intotcpcb(inp);
+ if ((tp->t_flags & TF_DISCONNECTED) == 0) {
if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
fa->sc = sc; /* Found. */
}
diff --git a/sys/dev/dpaa2/dpaa2_buf.c b/sys/dev/dpaa2/dpaa2_buf.c
index 8505b074fe4f..228e4448210d 100644
--- a/sys/dev/dpaa2/dpaa2_buf.c
+++ b/sys/dev/dpaa2/dpaa2_buf.c
@@ -42,6 +42,7 @@
#include "dpaa2_swp.h"
#include "dpaa2_swp_if.h"
#include "dpaa2_ni.h"
+#include "dpaa2_frame.h"
MALLOC_DEFINE(M_DPAA2_RXB, "dpaa2_rxb", "DPAA2 DMA-mapped buffer (Rx)");
@@ -129,7 +130,7 @@ dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size,
struct mtx *dma_mtx)
{
struct dpaa2_ni_softc *sc = device_get_softc(dev);
- struct dpaa2_fa *fa;
+ struct dpaa2_swa *swa;
bool map_created = false;
bool mbuf_alloc = false;
int error;
@@ -179,9 +180,9 @@ dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size,
buf->vaddr = buf->m->m_data;
/* Populate frame annotation for future use */
- fa = (struct dpaa2_fa *)buf->vaddr;
- fa->magic = DPAA2_MAGIC;
- fa->buf = buf;
+ swa = (struct dpaa2_swa *)buf->vaddr;
+ swa->magic = DPAA2_MAGIC;
+ swa->buf = buf;
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREREAD);
diff --git a/sys/dev/dpaa2/dpaa2_buf.h b/sys/dev/dpaa2/dpaa2_buf.h
index 853a4fa78d3a..16ea7e1905ac 100644
--- a/sys/dev/dpaa2/dpaa2_buf.h
+++ b/sys/dev/dpaa2/dpaa2_buf.h
@@ -33,6 +33,8 @@
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
#include <machine/bus.h>
diff --git a/sys/dev/dpaa2/dpaa2_frame.c b/sys/dev/dpaa2/dpaa2_frame.c
new file mode 100644
index 000000000000..4a155f7cb32f
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_frame.c
@@ -0,0 +1,165 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2026 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/errno.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/vmparam.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_frame.h"
+#include "dpaa2_buf.h"
+#include "dpaa2_swp.h"
+
+/**
+ * @brief Build a DPAA2 frame descriptor.
+ */
+int
+dpaa2_fd_build(device_t dev, const uint16_t tx_data_off, struct dpaa2_buf *buf,
+ bus_dma_segment_t *segs, const int nsegs, struct dpaa2_fd *fd)
+{
+ struct dpaa2_buf *sgt = buf->sgt;
+ struct dpaa2_sg_entry *sge;
+ struct dpaa2_swa *swa;
+ int i, error;
+
+ if (buf == NULL || segs == NULL || nsegs == 0 || fd == NULL)
+ return (EINVAL);
+
+ KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
+ KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
+ KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
+ KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
+
+ memset(fd, 0, sizeof(*fd));
+
+ /* Populate and map S/G table */
+ if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
+ sge = (struct dpaa2_sg_entry *)sgt->vaddr + tx_data_off;
+ for (i = 0; i < nsegs; i++) {
+ sge[i].addr = (uint64_t)segs[i].ds_addr;
+ sge[i].len = (uint32_t)segs[i].ds_len;
+ sge[i].offset_fmt = 0u;
+ }
+ sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
+
+ KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
+ sgt->paddr));
+
+ error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
+ DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
+ BUS_DMA_NOWAIT);
+ if (__predict_false(error != 0)) {
+ device_printf(dev, "%s: bus_dmamap_load() failed: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ buf->paddr = sgt->paddr;
+ buf->vaddr = sgt->vaddr;
+ } else {
+ return (EINVAL);
+ }
+
+ swa = (struct dpaa2_swa *)sgt->vaddr;
+ swa->magic = DPAA2_MAGIC;
+ swa->buf = buf;
+
+ fd->addr = buf->paddr;
+ fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
+ fd->bpid_ivp_bmt = 0;
+ fd->offset_fmt_sl = 0x2000u | tx_data_off;
+ fd->ctrl = (0x4u & DPAA2_FD_PTAC_MASK) << DPAA2_FD_PTAC_SHIFT;
+
+ return (0);
+}
+
+int
+dpaa2_fd_err(struct dpaa2_fd *fd)
+{
+ return ((fd->ctrl >> DPAA2_FD_ERR_SHIFT) & DPAA2_FD_ERR_MASK);
+}
+
+uint32_t
+dpaa2_fd_data_len(struct dpaa2_fd *fd)
+{
+ if (dpaa2_fd_short_len(fd)) {
+ return (fd->data_length & DPAA2_FD_LEN_MASK);
+ }
+ return (fd->data_length);
+}
+
+int
+dpaa2_fd_format(struct dpaa2_fd *fd)
+{
+ return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
+ DPAA2_FD_FMT_SHIFT) & DPAA2_FD_FMT_MASK));
+}
+
+bool
+dpaa2_fd_short_len(struct dpaa2_fd *fd)
+{
+ return (((fd->offset_fmt_sl >> DPAA2_FD_SL_SHIFT)
+ & DPAA2_FD_SL_MASK) == 1);
+}
+
+int
+dpaa2_fd_offset(struct dpaa2_fd *fd)
+{
+ return (fd->offset_fmt_sl & DPAA2_FD_OFFSET_MASK);
+}
+
+int
+dpaa2_fa_get_swa(struct dpaa2_fd *fd, struct dpaa2_swa **swa)
+{
+ int rc;
+
+ if (fd == NULL || swa == NULL)
+ return (EINVAL);
+
+ if (((fd->ctrl >> DPAA2_FD_PTAC_SHIFT) & DPAA2_FD_PTAC_MASK) >= 0x4u) {
+ *swa = (struct dpaa2_swa *)PHYS_TO_DMAP((bus_addr_t)fd->addr);
+ rc = 0;
+ } else {
+ *swa = NULL;
+ rc = ENOENT;
+ }
+
+ return (rc);
+}
+
+int
+dpaa2_fa_get_hwa(struct dpaa2_fd *fd, struct dpaa2_hwa **hwa)
+{
+ /* TODO: To be implemented next. */
+ return (ENOENT);
+}
diff --git a/sys/dev/dpaa2/dpaa2_frame.h b/sys/dev/dpaa2/dpaa2_frame.h
new file mode 100644
index 000000000000..0b2a5a7d8e74
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_frame.h
@@ -0,0 +1,174 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2026 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_FRAME_H
+#define _DPAA2_FRAME_H
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/kassert.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_buf.h"
+
+/*
+ * Helper routines for the DPAA2 frames (e.g. descriptors, software/hardware
+ * annotations, etc.).
+ */
+
+/*
+ * DPAA2 frame descriptor size, field offsets and masks.
+ *
+ * See 3.1.1 Frame descriptor format,
+ * 4.2.1.2.2 Structure of Frame Descriptors (FDs),
+ * LX2160A DPAA2 Low-Level Hardware Reference Manual, Rev. 0, 06/2020
+ */
+#define DPAA2_FD_SIZE 32u
+#define DPAA2_FD_FMT_MASK (0x3u)
+#define DPAA2_FD_FMT_SHIFT (12)
+#define DPAA2_FD_ERR_MASK (0xFFu)
+#define DPAA2_FD_ERR_SHIFT (0)
+#define DPAA2_FD_SL_MASK (0x1u)
+#define DPAA2_FD_SL_SHIFT (14)
+#define DPAA2_FD_LEN_MASK (0x3FFFFu)
+#define DPAA2_FD_OFFSET_MASK (0x0FFFu)
+#define DPAA2_FD_PTAC_MASK (0x7u)
+#define DPAA2_FD_PTAC_SHIFT (21)
+
+/*
+ * DPAA2 frame annotation sizes
+ *
+ * NOTE: Accelerator-specific (HWA) annotation length is described in the 64-byte
+ * units by the FD[ASAL] bits and can be as big as 960 bytes. Current
+ * values describe what is actually supported by the DPAA2 drivers.
+ *
+ * See 3.1.1 Frame descriptor format,
+ * LX2160A DPAA2 Low-Level Hardware Reference Manual, Rev. 0
+ */
+#define DPAA2_FA_SIZE 192u /* DPAA2 frame annotation */
+#define DPAA2_FA_SWA_SIZE 64u /* SW frame annotation */
+#define DPAA2_FA_HWA_SIZE 128u /* HW frame annotation */
+#define DPAA2_FA_WRIOP_SIZE 128u /* WRIOP HW annotation */
+
+/**
+ * @brief DPAA2 frame descriptor.
+ *
+ * addr: Memory address of the start of the buffer holding the
+ * frame data or the buffer containing the scatter/gather
+ * list.
+ * data_length: Length of the frame data (in bytes).
+ * bpid_ivp_bmt: Buffer pool ID (14 bit + BMT bit + IVP bit)
+ * offset_fmt_sl: Frame data offset, frame format and short-length fields.
+ * frame_ctx: Frame context. This field allows the sender of a frame
+ * to communicate some out-of-band information to the
+ * receiver of the frame.
+ * ctrl: Control bits (ERR, CBMT, ASAL, PTAC, DROPP, SC, DD).
+ * flow_ctx: Frame flow context. Associates the frame with a flow
+ * structure. QMan may use the FLC field for 3 purposes:
+ * stashing control, order definition point identification,
+ * and enqueue replication control.
+ *
+ * See 3.1.1 Frame descriptor format,
+ * 4.2.1.2.2 Structure of Frame Descriptors (FDs),
+ * LX2160A DPAA2 Low-Level Hardware Reference Manual, Rev. 0, 06/2020
+ */
+struct dpaa2_fd {
+ uint64_t addr;
+ uint32_t data_length;
+ uint16_t bpid_ivp_bmt;
+ uint16_t offset_fmt_sl;
+ uint32_t frame_ctx;
+ uint32_t ctrl;
+ uint64_t flow_ctx;
+} __packed;
+CTASSERT(sizeof(struct dpaa2_fd) == DPAA2_FD_SIZE);
+
+/**
+ * @brief WRIOP hardware frame annotation.
+ *
+ * See 7.34.2 WRIOP hardware frame annotation (FA),
+ * LX2160A DPAA2 Low-Level Hardware Reference Manual, Rev. 0, 06/2020
+ */
+struct dpaa2_hwa_wriop {
+ union {
+ struct {
+ uint64_t fas;
+ uint64_t timestamp;
+ /* XXX-DSL: more to add here... */
+ } __packed;
+ uint8_t raw[128];
+ };
+} __packed;
+CTASSERT(sizeof(struct dpaa2_hwa_wriop) == DPAA2_FA_WRIOP_SIZE);
+
+/**
+ * @brief DPAA2 hardware frame annotation (accelerator-specific annotation).
+ *
+ * See 3.4.1.2 Accelerator-specific annotation,
+ * LX2160A DPAA2 Low-Level Hardware Reference Manual, Rev. 0, 06/2020
+ */
+struct dpaa2_hwa {
+ union {
+ struct dpaa2_hwa_wriop wriop;
+ };
+} __packed;
+CTASSERT(sizeof(struct dpaa2_hwa) == DPAA2_FA_HWA_SIZE);
+
+/**
+ * @brief DPAA2 software frame annotation (pass-through annotation).
+ *
+ * See 3.4.1.1 Pass-through annotation,
+ * LX2160A DPAA2 Low-Level Hardware Reference Manual, Rev. 0, 06/2020
+ */
+struct dpaa2_swa {
+ union {
+ struct {
+ uint32_t magic;
+ struct dpaa2_buf *buf;
+ };
+ struct {
+ uint8_t pta1[32];
+ uint8_t pta2[32];
+ };
+ uint8_t raw[64];
+ };
+} __packed;
+CTASSERT(sizeof(struct dpaa2_swa) == DPAA2_FA_SWA_SIZE);
+
+int dpaa2_fd_build(device_t, const uint16_t, struct dpaa2_buf *,
+ bus_dma_segment_t *, const int, struct dpaa2_fd *);
+
+int dpaa2_fd_err(struct dpaa2_fd *);
+uint32_t dpaa2_fd_data_len(struct dpaa2_fd *);
+int dpaa2_fd_format(struct dpaa2_fd *);
+bool dpaa2_fd_short_len(struct dpaa2_fd *);
+int dpaa2_fd_offset(struct dpaa2_fd *);
+
+int dpaa2_fa_get_swa(struct dpaa2_fd *, struct dpaa2_swa **);
+int dpaa2_fa_get_hwa(struct dpaa2_fd *, struct dpaa2_hwa **);
+
+#endif /* _DPAA2_FRAME_H */
diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c
index c72e68b8a62f..5017b5113109 100644
--- a/sys/dev/dpaa2/dpaa2_ni.c
+++ b/sys/dev/dpaa2/dpaa2_ni.c
@@ -96,6 +96,7 @@
#include "dpaa2_ni.h"
#include "dpaa2_channel.h"
#include "dpaa2_buf.h"
+#include "dpaa2_frame.h"
#define BIT(x) (1ul << (x))
#define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
@@ -156,10 +157,6 @@ MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
#define DPAA2_RX_BUFRING_SZ (4096u)
#define DPAA2_RXE_BUFRING_SZ (1024u)
#define DPAA2_TXC_BUFRING_SZ (4096u)
-#define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */
-#define DPAA2_TX_SEG_SZ (PAGE_SIZE)
-#define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
-#define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* bytes */
/* Size of a buffer to keep a QoS table key configuration. */
#define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE)
@@ -186,15 +183,6 @@ MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
#define DPAA2_NI_TXBUF_IDX_MASK (0xFFu)
#define DPAA2_NI_TXBUF_IDX_SHIFT (49)
-#define DPAA2_NI_FD_FMT_MASK (0x3u)
-#define DPAA2_NI_FD_FMT_SHIFT (12)
-#define DPAA2_NI_FD_ERR_MASK (0xFFu)
-#define DPAA2_NI_FD_ERR_SHIFT (0)
-#define DPAA2_NI_FD_SL_MASK (0x1u)
-#define DPAA2_NI_FD_SL_SHIFT (14)
-#define DPAA2_NI_FD_LEN_MASK (0x3FFFFu)
-#define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
-
/* Enables TCAM for Flow Steering and QoS look-ups. */
#define DPNI_OPT_HAS_KEY_MASKING 0x10
@@ -424,15 +412,6 @@ static int dpaa2_ni_set_mac_addr(device_t);
static int dpaa2_ni_set_hash(device_t, uint64_t);
static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
-/* Frame descriptor routines */
-static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
- struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
-static int dpaa2_ni_fd_err(struct dpaa2_fd *);
-static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
-static int dpaa2_ni_fd_format(struct dpaa2_fd *);
-static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
-static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
-
/* Various subroutines */
static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
@@ -2995,14 +2974,18 @@ dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
}
}
- error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
+ error = dpaa2_fd_build(dev, sc->tx_data_off, buf, segs, nsegs, &fd);
if (__predict_false(error != 0)) {
device_printf(dev, "%s: failed to build frame descriptor: "
"error=%d\n", __func__, error);
fq->chan->tx_dropped++;
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
goto err_unload;
- }
+ } else
+ sc->tx_sg_frames++; /* for sysctl(9) */
+
+ bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
/* TODO: Enqueue several frames in a single command */
for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
@@ -3013,9 +2996,6 @@ dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
}
}
- bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
- bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
-
if (rc != 1) {
fq->chan->tx_dropped++;
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
@@ -3130,14 +3110,14 @@ dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
* @brief Receive frames.
*/
static int
-dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
- struct dpaa2_ni_rx_ctx *ctx)
+dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
+ struct dpaa2_fd *fd, struct dpaa2_ni_rx_ctx *ctx)
{
- bus_addr_t paddr = (bus_addr_t)fd->addr;
- struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
- struct dpaa2_buf *buf = fa->buf;
- struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
- struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
+ bus_addr_t paddr;
+ struct dpaa2_swa *swa;
+ struct dpaa2_buf *buf;
+ struct dpaa2_channel *bch;
+ struct dpaa2_ni_softc *sc;
struct dpaa2_bp_softc *bpsc;
struct mbuf *m;
device_t bpdev;
@@ -3145,7 +3125,17 @@ dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *f
void *buf_data;
int buf_len, error, released_n = 0;
- KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
+ error = dpaa2_fa_get_swa(fd, &swa);
+ if (__predict_false(error != 0))
+ panic("%s: frame has no software annotation: error=%d",
+ __func__, error);
+
+ paddr = (bus_addr_t)fd->addr;
+ buf = swa->buf;
+ bch = (struct dpaa2_channel *)buf->opt;
+ sc = device_get_softc(bch->ni_dev);
+
+ KASSERT(swa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
/*
* NOTE: Current channel might not be the same as the "buffer" channel
* and it's fine. It must not be NULL though.
@@ -3157,7 +3147,7 @@ dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *f
__func__, paddr, buf->paddr);
}
- switch (dpaa2_ni_fd_err(fd)) {
+ switch (dpaa2_fd_err(fd)) {
case 1: /* Enqueue rejected by QMan */
sc->rx_enq_rej_frames++;
break;
@@ -3167,7 +3157,7 @@ dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *f
default:
break;
}
- switch (dpaa2_ni_fd_format(fd)) {
+ switch (dpaa2_fd_format(fd)) {
case DPAA2_FD_SINGLE:
sc->rx_single_buf_frames++;
break;
@@ -3183,9 +3173,11 @@ dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *f
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(buf->dmat, buf->dmap);
+
m = buf->m;
- buf_len = dpaa2_ni_fd_data_len(fd);
- buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
+ buf_len = dpaa2_fd_data_len(fd);
+ buf_data = (uint8_t *)buf->vaddr + dpaa2_fd_offset(fd);
+
/* Prepare buffer to be re-cycled */
buf->m = NULL;
buf->paddr = 0;
@@ -3273,16 +3265,26 @@ static int
dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd)
{
- bus_addr_t paddr = (bus_addr_t)fd->addr;
- struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
- struct dpaa2_buf *buf = fa->buf;
- struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
- struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
+ bus_addr_t paddr;
+ struct dpaa2_swa *swa;
+ struct dpaa2_buf *buf;
+ struct dpaa2_channel *bch;
+ struct dpaa2_ni_softc *sc;
device_t bpdev;
struct dpaa2_bp_softc *bpsc;
int error;
- KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
+ error = dpaa2_fa_get_swa(fd, &swa);
+ if (__predict_false(error != 0))
+ panic("%s: frame has no software annotation: error=%d",
+ __func__, error);
+
+ paddr = (bus_addr_t)fd->addr;
+ buf = swa->buf;
+ bch = (struct dpaa2_channel *)buf->opt;
+ sc = device_get_softc(bch->ni_dev);
+
+ KASSERT(swa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
/*
* NOTE: Current channel might not be the same as the "buffer" channel
* and it's fine. It must not be NULL though.
@@ -3316,14 +3318,26 @@ static int
dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd)
{
- bus_addr_t paddr = (bus_addr_t)fd->addr;
- struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
- struct dpaa2_buf *buf = fa->buf;
- struct dpaa2_buf *sgt = buf->sgt;
- struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
- struct dpaa2_channel *bch = tx->fq->chan;
+ bus_addr_t paddr;
+ struct dpaa2_swa *swa;
+ struct dpaa2_buf *buf;
+ struct dpaa2_buf *sgt;
+ struct dpaa2_ni_tx_ring *tx;
+ struct dpaa2_channel *bch;
+ int error;
- KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
+ error = dpaa2_fa_get_swa(fd, &swa);
+ if (__predict_false(error != 0))
+ panic("%s: frame has no software annotation: error=%d",
+ __func__, error);
+
+ paddr = (bus_addr_t)fd->addr;
+ buf = swa->buf;
+ sgt = buf->sgt;
+ tx = (struct dpaa2_ni_tx_ring *)buf->opt;
+ bch = tx->fq->chan;
+
+ KASSERT(swa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
/*
@@ -3372,102 +3386,6 @@ dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
}
/**
- * @brief Build a DPAA2 frame descriptor.
- */
-static int
-dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
- struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
-{
- struct dpaa2_buf *sgt = buf->sgt;
- struct dpaa2_sg_entry *sge;
- struct dpaa2_fa *fa;
- int i, error;
-
- KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
- KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
- KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
- KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
-
- memset(fd, 0, sizeof(*fd));
-
- /* Populate and map S/G table */
- if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
- sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
- for (i = 0; i < nsegs; i++) {
- sge[i].addr = (uint64_t)segs[i].ds_addr;
- sge[i].len = (uint32_t)segs[i].ds_len;
- sge[i].offset_fmt = 0u;
- }
- sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
-
- KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
- sgt->paddr));
-
- error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
- DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
- BUS_DMA_NOWAIT);
- if (__predict_false(error != 0)) {
- device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
- "error=%d\n", __func__, error);
- return (error);
- }
-
- buf->paddr = sgt->paddr;
- buf->vaddr = sgt->vaddr;
- sc->tx_sg_frames++; /* for sysctl(9) */
- } else {
- return (EINVAL);
- }
-
- fa = (struct dpaa2_fa *)sgt->vaddr;
- fa->magic = DPAA2_MAGIC;
- fa->buf = buf;
-
- fd->addr = buf->paddr;
- fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
- fd->bpid_ivp_bmt = 0;
- fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
- fd->ctrl = 0x00800000u;
-
- return (0);
-}
-
-static int
-dpaa2_ni_fd_err(struct dpaa2_fd *fd)
-{
- return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
-}
-
-static uint32_t
-dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
-{
- if (dpaa2_ni_fd_short_len(fd)) {
- return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
- }
- return (fd->data_length);
-}
-
-static int
-dpaa2_ni_fd_format(struct dpaa2_fd *fd)
-{
- return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
- DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
-}
-
-static bool
-dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
-{
- return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
- & DPAA2_NI_FD_SL_MASK) == 1);
-}
-
-static int
-dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
-{
- return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
-}
-
-/**
* @brief Collect statistics of the network interface.
*/
static int
diff --git a/sys/dev/dpaa2/dpaa2_ni.h b/sys/dev/dpaa2/dpaa2_ni.h
index 6fb0673fac09..fcd37501ebd0 100644
--- a/sys/dev/dpaa2/dpaa2_ni.h
+++ b/sys/dev/dpaa2/dpaa2_ni.h
@@ -490,8 +490,9 @@ struct dpaa2_ni_softc {
struct dpaa2_channel *channels[DPAA2_MAX_CHANNELS];
struct dpaa2_ni_fq rxe_queue; /* one per DPNI */
+ /* sysctl(9) */
struct dpaa2_atomic buf_num;
- struct dpaa2_atomic buf_free; /* for sysctl(9) only */
+ struct dpaa2_atomic buf_free;
int irq_rid[DPAA2_NI_MSI_COUNT];
struct resource *irq_res;
diff --git a/sys/dev/dpaa2/dpaa2_swp.h b/sys/dev/dpaa2/dpaa2_swp.h
index 1b1383b4241f..20980c6b71b7 100644
--- a/sys/dev/dpaa2/dpaa2_swp.h
+++ b/sys/dev/dpaa2/dpaa2_swp.h
@@ -35,6 +35,7 @@
#include "dpaa2_types.h"
#include "dpaa2_buf.h"
#include "dpaa2_bp.h"
+#include "dpaa2_frame.h"
/*
* DPAA2 QBMan software portal.
@@ -200,10 +201,8 @@
#define DPAA2_EQ_DESC_SIZE 32u /* Enqueue Command Descriptor */
#define DPAA2_FDR_DESC_SIZE 32u /* Descriptor of the FDR */
-#define DPAA2_FD_SIZE 32u /* Frame Descriptor */
#define DPAA2_FDR_SIZE 64u /* Frame Dequeue Response */
#define DPAA2_SCN_SIZE 16u /* State Change Notification */
-#define DPAA2_FA_SIZE 64u /* SW Frame Annotation */
#define DPAA2_SGE_SIZE 16u /* S/G table entry */
#define DPAA2_DQ_SIZE 64u /* Dequeue Response */
#define DPAA2_SWP_CMD_SIZE 64u /* SWP Command */
@@ -285,54 +284,6 @@ struct dpaa2_scn {
CTASSERT(sizeof(struct dpaa2_scn) == DPAA2_SCN_SIZE);
/**
- * @brief DPAA2 frame descriptor.
- *
- * addr: Memory address of the start of the buffer holding the
- * frame data or the buffer containing the scatter/gather
- * list.
- * data_length: Length of the frame data (in bytes).
- * bpid_ivp_bmt: Buffer pool ID (14 bit + BMT bit + IVP bit)
- * offset_fmt_sl: Frame data offset, frame format and short-length fields.
- * frame_ctx: Frame context. This field allows the sender of a frame
- * to communicate some out-of-band information to the
- * receiver of the frame.
- * ctrl: Control bits (ERR, CBMT, ASAL, PTAC, DROPP, SC, DD).
- * flow_ctx: Frame flow context. Associates the frame with a flow
- * structure. QMan may use the FLC field for 3 purposes:
- * stashing control, order definition point identification,
- * and enqueue replication control.
- */
-struct dpaa2_fd {
- uint64_t addr;
- uint32_t data_length;
- uint16_t bpid_ivp_bmt;
- uint16_t offset_fmt_sl;
- uint32_t frame_ctx;
- uint32_t ctrl;
- uint64_t flow_ctx;
-} __packed;
-CTASSERT(sizeof(struct dpaa2_fd) == DPAA2_FD_SIZE);
-
-/**
- * @brief DPAA2 frame annotation.
- */
-struct dpaa2_fa {
- uint32_t magic;
- struct dpaa2_buf *buf;
-#ifdef __notyet__
- union {
- struct { /* Tx frame annotation */
- struct dpaa2_ni_tx_ring *tx;
- };
- struct { /* Rx frame annotation */
- uint64_t _notused;
- };
- };
-#endif
-} __packed;
-CTASSERT(sizeof(struct dpaa2_fa) <= DPAA2_FA_SIZE);
-
-/**
* @brief DPAA2 scatter/gather entry.
*/
struct dpaa2_sg_entry {
diff --git a/sys/dev/dpaa2/dpaa2_types.h b/sys/dev/dpaa2/dpaa2_types.h
index dbfac9ce0a40..dc1c232c09c2 100644
--- a/sys/dev/dpaa2/dpaa2_types.h
+++ b/sys/dev/dpaa2/dpaa2_types.h
@@ -40,6 +40,11 @@
#define DPAA2_MAX_CHANNELS 16 /* CPU cores */
#define DPAA2_MAX_TCS 8 /* Traffic classes */
+#define DPAA2_TX_SEGLIMIT (16u) /* for 64 KiB frames */
+#define DPAA2_TX_SEG_SZ (PAGE_SIZE)
+#define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
+#define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* in bytes */
+
/**
* @brief Types of the DPAA2 devices.
*/
diff --git a/sys/dev/dwc/if_dwc.c b/sys/dev/dwc/if_dwc.c
index 21a520db8b89..cd8651cc99ff 100644
--- a/sys/dev/dwc/if_dwc.c
+++ b/sys/dev/dwc/if_dwc.c
@@ -527,13 +527,13 @@ dwc_attach(device_t dev)
sc->txpbl = pbl;
if (OF_getencprop(sc->node, "snps,rxpbl", &sc->rxpbl, sizeof(uint32_t)) <= 0)
sc->rxpbl = pbl;
- if (OF_hasprop(sc->node, "snps,no-pbl-x8") == 1)
+ if (OF_hasprop(sc->node, "snps,no-pbl-x8"))
sc->nopblx8 = true;
- if (OF_hasprop(sc->node, "snps,fixed-burst") == 1)
+ if (OF_hasprop(sc->node, "snps,fixed-burst"))
sc->fixed_burst = true;
- if (OF_hasprop(sc->node, "snps,mixed-burst") == 1)
+ if (OF_hasprop(sc->node, "snps,mixed-burst"))
sc->mixed_burst = true;
- if (OF_hasprop(sc->node, "snps,aal") == 1)
+ if (OF_hasprop(sc->node, "snps,aal"))
sc->aal = true;
error = clk_set_assigned(dev, ofw_bus_get_node(dev));
diff --git a/sys/dev/etherswitch/e6000sw/e6000sw.c b/sys/dev/etherswitch/e6000sw/e6000sw.c
index 7e9193f4ba47..248a13952d35 100644
--- a/sys/dev/etherswitch/e6000sw/e6000sw.c
+++ b/sys/dev/etherswitch/e6000sw/e6000sw.c
@@ -302,6 +302,10 @@ e6000sw_probe(device_t dev)
description = "Marvell 88E6352";
sc->num_ports = 7;
break;
+ case MV88E6171:
+ description = "Marvell 88E6171";
+ sc->num_ports = 7;
+ break;
case MV88E6172:
description = "Marvell 88E6172";
sc->num_ports = 7;
@@ -571,6 +575,8 @@ e6000sw_attach(device_t dev)
}
for (child = OF_child(ports); child != 0; child = OF_peer(child)) {
+ if (!ofw_bus_node_status_okay(child))
+ continue;
err = e6000sw_parse_child_fdt(sc, child, &port);
if (err != 0) {
device_printf(sc->dev, "failed to parse DTS\n");
diff --git a/sys/dev/etherswitch/e6000sw/e6000swreg.h b/sys/dev/etherswitch/e6000sw/e6000swreg.h
index ec4503faeec5..aef79ad9de5d 100644
--- a/sys/dev/etherswitch/e6000sw/e6000swreg.h
+++ b/sys/dev/etherswitch/e6000sw/e6000swreg.h
@@ -45,6 +45,7 @@ struct atu_opt {
#define MV88E6341 0x3410
#define MV88E6352 0x3520
#define MV88E6172 0x1720
+#define MV88E6171 0x1710
#define MV88E6176 0x1760
#define MV88E6190 0x1900
#define MV88E6190X 0x0a00
diff --git a/sys/dev/evdev/evdev_utils.c b/sys/dev/evdev/evdev_utils.c
index a075a9be9bb7..d7b7b790dc2c 100644
--- a/sys/dev/evdev/evdev_utils.c
+++ b/sys/dev/evdev/evdev_utils.c
@@ -92,8 +92,8 @@ static uint16_t evdev_usb_scancodes[256] = {
NONE, NONE, NONE, NONE,
NONE, NONE, NONE, NONE,
/* 0xc0 - 0xdf */
- NONE, NONE, NONE, NONE,
- NONE, NONE, NONE, NONE,
+ KEY_BRIGHTNESSDOWN, KEY_BRIGHTNESSUP, KEY_SCALE, KEY_DASHBOARD,
+ KEY_KBDILLUMDOWN, KEY_KBDILLUMUP, NONE, NONE,
NONE, NONE, NONE, NONE,
NONE, NONE, NONE, NONE,
NONE, NONE, NONE, NONE,
@@ -108,7 +108,12 @@ static uint16_t evdev_usb_scancodes[256] = {
KEY_WWW, KEY_BACK, KEY_FORWARD, KEY_STOP,
KEY_FIND, KEY_SCROLLUP, KEY_SCROLLDOWN, KEY_EDIT,
KEY_SLEEP, KEY_COFFEE, KEY_REFRESH, KEY_CALC,
- NONE, NONE, NONE, NONE,
+ /*
+ * last item maps to APPLE_FN_KEY in hkbd.c. using KEY_WAKEUP instead
+ * of KEY_FN as evdev translates the latter to too high of a code for
+ * xkb to parse.
+ */
+ NONE, NONE, NONE, KEY_WAKEUP,
};
diff --git a/sys/dev/hid/appleir.c b/sys/dev/hid/appleir.c
new file mode 100644
index 000000000000..956ad27f6d70
--- /dev/null
+++ b/sys/dev/hid/appleir.c
@@ -0,0 +1,440 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2026 Abdelkader Boudih <freebsd@seuros.com>
+ */
+
+/*
+ * Apple IR Remote Control Driver
+ *
+ * HID driver for Apple IR receivers (USB HID, vendor 0x05ac).
+ * Supports Apple Remote and generic IR remotes using NEC protocol.
+ *
+ * The Apple Remote protocol was reverse-engineered by James McKenzie and
+ * others; key codes and packet format constants are derived from that work
+ * and are factual descriptions of the hardware protocol, not copied code.
+ * Linux reference (GPL-2.0, no code copied): drivers/hid/hid-appleir.c
+ *
+ * Apple Remote Protocol (proprietary):
+ * Key down: [0x25][0x87][0xee][remote_id][key_code]
+ * Key repeat: [0x26][0x87][0xee][remote_id][key_code]
+ * Battery low: [0x25][0x87][0xe0][remote_id][0x00]
+ * Key decode: (byte4 >> 1) & 0x0F -> keymap[index]
+ * Two-packet: bit 6 of key_code (0x40) set -> store index, use on next keydown
+ *
+ * Generic IR Protocol (NEC-style):
+ * Format: [0x26][0x7f][0x80][code][~code]
+ * Checksum: code + ~code = 0xFF
+ *
+ * NO hardware key-up events -- synthesize via 125ms callout timer.
+ */
+
+#include <sys/cdefs.h>
+
+#include "opt_hid.h"
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/callout.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+
+#include <dev/evdev/input.h>
+#include <dev/evdev/evdev.h>
+
+#define HID_DEBUG_VAR appleir_debug
+#include <dev/hid/hid.h>
+#include <dev/hid/hidbus.h>
+#include "usbdevs.h"
+
+#ifdef HID_DEBUG
+static int appleir_debug = 0;
+
+static SYSCTL_NODE(_hw_hid, OID_AUTO, appleir, CTLFLAG_RW, 0,
+ "Apple IR Remote Control");
+SYSCTL_INT(_hw_hid_appleir, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &appleir_debug, 0, "Debug level");
+#endif
+
+/* Protocol constants */
+#define APPLEIR_REPORT_LEN 5
+#define APPLEIR_KEY_MASK 0x0F
+#define APPLEIR_TWO_PKT_FLAG 0x40 /* bit 6: two-packet command */
+#define APPLEIR_KEYUP_TICKS MAX(1, hz / 8) /* 125ms */
+#define APPLEIR_TWOPKT_TICKS MAX(1, hz / 4) /* 250ms */
+
+/* Report type markers (byte 0) */
+#define APPLEIR_PKT_KEYDOWN 0x25 /* key down / battery low */
+#define APPLEIR_PKT_REPEAT 0x26 /* key repeat / NEC generic */
+
+/* Apple Remote signature (bytes 1-2) */
+#define APPLEIR_SIG_HI 0x87
+#define APPLEIR_SIG_KEYLO 0xee /* normal key event */
+#define APPLEIR_SIG_BATTLO 0xe0 /* battery low event */
+
+/* Generic IR NEC signature (bytes 1-2) */
+#define APPLEIR_NEC_HI 0x7f
+#define APPLEIR_NEC_LO 0x80
+#define APPLEIR_NEC_CHECKSUM 0xFF /* code + ~code must equal this */
+
+/*
+ * Apple IR keymap: 17 entries, index = (key_code >> 1) & 0x0F
+ * Based on Linux driver (hid-appleir.c) keymap.
+ */
+static const uint16_t appleir_keymap[] = {
+ KEY_RESERVED, /* 0x00 */
+ KEY_MENU, /* 0x01 - menu */
+ KEY_PLAYPAUSE, /* 0x02 - play/pause */
+ KEY_FORWARD, /* 0x03 - >> */
+ KEY_BACK, /* 0x04 - << */
+ KEY_VOLUMEUP, /* 0x05 - + */
+ KEY_VOLUMEDOWN, /* 0x06 - - */
+ KEY_RESERVED, /* 0x07 */
+ KEY_RESERVED, /* 0x08 */
+ KEY_RESERVED, /* 0x09 */
+ KEY_RESERVED, /* 0x0A */
+ KEY_RESERVED, /* 0x0B */
+ KEY_RESERVED, /* 0x0C */
+ KEY_RESERVED, /* 0x0D */
+ KEY_ENTER, /* 0x0E - middle button (two-packet) */
+ KEY_PLAYPAUSE, /* 0x0F - play/pause (two-packet) */
+ KEY_RESERVED, /* 0x10 - out of range guard */
+};
+#define APPLEIR_NKEYS (nitems(appleir_keymap))
+
+/*
+ * Generic IR keymap (NEC protocol codes).
+ * Maps raw NEC codes to evdev KEY_* codes.
+ */
+struct generic_ir_map {
+ uint8_t code; /* NEC IR code */
+ uint16_t key; /* evdev KEY_* */
+};
+
+static const struct generic_ir_map generic_keymap[] = {
+ { 0xe1, KEY_VOLUMEUP },
+ { 0xe9, KEY_VOLUMEDOWN },
+ { 0xed, KEY_CHANNELUP },
+ { 0xf3, KEY_CHANNELDOWN },
+ { 0xf5, KEY_PLAYPAUSE },
+ { 0xf9, KEY_POWER },
+ { 0xfb, KEY_MUTE },
+ { 0xfe, KEY_OK },
+};
+#define GENERIC_NKEYS (nitems(generic_keymap))
+
+static uint16_t
+generic_ir_lookup(uint8_t code)
+{
+ int i;
+
+ for (i = 0; i < GENERIC_NKEYS; i++) {
+ if (generic_keymap[i].code == code)
+ return (generic_keymap[i].key);
+ }
+ return (KEY_RESERVED);
+}
+
+struct appleir_softc {
+ device_t sc_dev;
+ struct mtx sc_mtx; /* protects below + callout */
+ struct evdev_dev *sc_evdev;
+ struct callout sc_co; /* key-up timer */
+ struct callout sc_twoco; /* two-packet timeout */
+ uint16_t sc_current_key; /* evdev keycode (0=none) */
+ int sc_prev_key_idx;/* two-packet state (0=none) */
+ bool sc_batt_warned;
+};
+
+
+/*
+ * Callout: synthesize key-up event (no hardware key-up from remote).
+ * Runs with sc_mtx held (callout_init_mtx).
+ */
+static void
+appleir_keyup(void *arg)
+{
+ struct appleir_softc *sc = arg;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->sc_current_key != 0) {
+ evdev_push_key(sc->sc_evdev, sc->sc_current_key, 0);
+ evdev_sync(sc->sc_evdev);
+ sc->sc_current_key = 0;
+ sc->sc_prev_key_idx = 0;
+ }
+}
+
+static void
+appleir_twopacket_timeout(void *arg)
+{
+ struct appleir_softc *sc = arg;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+ sc->sc_prev_key_idx = 0;
+}
+
+/*
+ * Process 5-byte HID interrupt report.
+ * Called from hidbus interrupt context.
+ */
+static void
+appleir_intr(void *context, void *data, hid_size_t len)
+{
+ struct appleir_softc *sc = context;
+ uint8_t *buf = data;
+ uint8_t report[APPLEIR_REPORT_LEN];
+ int index;
+ uint16_t new_key;
+
+ if (len != APPLEIR_REPORT_LEN) {
+ DPRINTFN(1, "bad report len: %zu\n", (size_t)len);
+ return;
+ }
+
+ memcpy(report, buf, APPLEIR_REPORT_LEN);
+
+ mtx_lock(&sc->sc_mtx);
+
+ /* Battery low: [KEYDOWN][SIG_HI][SIG_BATTLO] -- log and ignore */
+ if (report[0] == APPLEIR_PKT_KEYDOWN &&
+ report[1] == APPLEIR_SIG_HI && report[2] == APPLEIR_SIG_BATTLO) {
+ if (!sc->sc_batt_warned) {
+ device_printf(sc->sc_dev,
+ "remote battery may be low\n");
+ sc->sc_batt_warned = true;
+ }
+ goto done;
+ }
+
+ /* Key down: [KEYDOWN][SIG_HI][SIG_KEYLO][remote_id][key_code] */
+ if (report[0] == APPLEIR_PKT_KEYDOWN &&
+ report[1] == APPLEIR_SIG_HI && report[2] == APPLEIR_SIG_KEYLO) {
+ /* Release previous key if held */
+ if (sc->sc_current_key != 0) {
+ evdev_push_key(sc->sc_evdev, sc->sc_current_key, 0);
+ evdev_sync(sc->sc_evdev);
+ sc->sc_current_key = 0;
+ }
+
+ if (sc->sc_prev_key_idx > 0) {
+ /* Second packet of a two-packet command */
+ index = sc->sc_prev_key_idx;
+ sc->sc_prev_key_idx = 0;
+ callout_stop(&sc->sc_twoco);
+ } else if (report[4] & APPLEIR_TWO_PKT_FLAG) {
+ /* First packet of a two-packet command -- wait for next */
+ sc->sc_prev_key_idx = (report[4] >> 1) & APPLEIR_KEY_MASK;
+ callout_reset(&sc->sc_twoco, APPLEIR_TWOPKT_TICKS,
+ appleir_twopacket_timeout, sc);
+ goto done;
+ } else {
+ index = (report[4] >> 1) & APPLEIR_KEY_MASK;
+ }
+
+ new_key = (index < APPLEIR_NKEYS) ?
+ appleir_keymap[index] : KEY_RESERVED;
+ if (new_key != KEY_RESERVED) {
+ sc->sc_current_key = new_key;
+ evdev_push_key(sc->sc_evdev, new_key, 1);
+ evdev_sync(sc->sc_evdev);
+ callout_reset(&sc->sc_co, APPLEIR_KEYUP_TICKS,
+ appleir_keyup, sc);
+ }
+ goto done;
+ }
+
+ /* Key repeat: [REPEAT][SIG_HI][SIG_KEYLO][remote_id][key_code] */
+ if (report[0] == APPLEIR_PKT_REPEAT &&
+ report[1] == APPLEIR_SIG_HI && report[2] == APPLEIR_SIG_KEYLO) {
+ uint16_t repeat_key;
+ int repeat_idx;
+
+ if (sc->sc_prev_key_idx > 0)
+ goto done;
+ if (report[4] & APPLEIR_TWO_PKT_FLAG)
+ goto done;
+
+ repeat_idx = (report[4] >> 1) & APPLEIR_KEY_MASK;
+ repeat_key = (repeat_idx < APPLEIR_NKEYS) ?
+ appleir_keymap[repeat_idx] : KEY_RESERVED;
+ if (repeat_key == KEY_RESERVED ||
+ repeat_key != sc->sc_current_key)
+ goto done;
+
+ evdev_push_key(sc->sc_evdev, repeat_key, 1);
+ evdev_sync(sc->sc_evdev);
+ callout_reset(&sc->sc_co, APPLEIR_KEYUP_TICKS,
+ appleir_keyup, sc);
+ goto done;
+ }
+
+ /* Generic IR (NEC protocol): [REPEAT][NEC_HI][NEC_LO][code][~code] */
+ if (report[0] == APPLEIR_PKT_REPEAT &&
+ report[1] == APPLEIR_NEC_HI && report[2] == APPLEIR_NEC_LO) {
+ uint8_t code = report[3];
+ uint8_t checksum = report[4];
+
+ sc->sc_prev_key_idx = 0;
+ callout_stop(&sc->sc_twoco);
+
+ if ((uint8_t)(code + checksum) != APPLEIR_NEC_CHECKSUM) {
+ DPRINTFN(1, "generic IR: bad checksum %02x+%02x\n",
+ code, checksum);
+ goto done;
+ }
+
+ new_key = generic_ir_lookup(code);
+ if (new_key == KEY_RESERVED)
+ goto done;
+
+ if (sc->sc_current_key != new_key) {
+ if (sc->sc_current_key != 0)
+ evdev_push_key(sc->sc_evdev,
+ sc->sc_current_key, 0);
+ sc->sc_current_key = new_key;
+ evdev_push_key(sc->sc_evdev, new_key, 1);
+ evdev_sync(sc->sc_evdev);
+ } else {
+ evdev_push_key(sc->sc_evdev, new_key, 1);
+ evdev_sync(sc->sc_evdev);
+ }
+ callout_reset(&sc->sc_co, APPLEIR_KEYUP_TICKS,
+ appleir_keyup, sc);
+ goto done;
+ }
+
+ DPRINTFN(1, "unknown report: %02x %02x %02x\n",
+ report[0], report[1], report[2]);
+
+done:
+ mtx_unlock(&sc->sc_mtx);
+}
+
+/* Apple IR receiver device IDs */
+static const struct hid_device_id appleir_devs[] = {
+ { HID_BVP(BUS_USB, USB_VENDOR_APPLE, 0x8240) },
+ { HID_BVP(BUS_USB, USB_VENDOR_APPLE, 0x8241) },
+ { HID_BVP(BUS_USB, USB_VENDOR_APPLE, 0x8242) },
+ { HID_BVP(BUS_USB, USB_VENDOR_APPLE, 0x8243) },
+ { HID_BVP(BUS_USB, USB_VENDOR_APPLE, 0x1440) },
+};
+
+static int
+appleir_probe(device_t dev)
+{
+ int error;
+
+ error = HIDBUS_LOOKUP_DRIVER_INFO(dev, appleir_devs);
+ if (error != 0)
+ return (error);
+
+ /* Only attach to first top-level collection (TLC index 0) */
+ if (hidbus_get_index(dev) != 0)
+ return (ENXIO);
+
+ hidbus_set_desc(dev, "Apple IR Receiver");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+appleir_attach(device_t dev)
+{
+ struct appleir_softc *sc = device_get_softc(dev);
+ const struct hid_device_info *hw;
+ int i, error;
+
+ sc->sc_dev = dev;
+ hw = hid_get_device_info(dev);
+ sc->sc_current_key = 0;
+ sc->sc_prev_key_idx = 0;
+ sc->sc_batt_warned = false;
+ mtx_init(&sc->sc_mtx, "appleir", NULL, MTX_DEF);
+ callout_init_mtx(&sc->sc_co, &sc->sc_mtx, 0);
+ callout_init_mtx(&sc->sc_twoco, &sc->sc_mtx, 0);
+
+ sc->sc_evdev = evdev_alloc();
+ evdev_set_name(sc->sc_evdev, device_get_desc(dev));
+ evdev_set_phys(sc->sc_evdev, device_get_nameunit(dev));
+ evdev_set_id(sc->sc_evdev, hw->idBus, hw->idVendor, hw->idProduct,
+ hw->idVersion);
+ evdev_set_serial(sc->sc_evdev, hw->serial);
+ evdev_support_event(sc->sc_evdev, EV_SYN);
+ evdev_support_event(sc->sc_evdev, EV_KEY);
+ evdev_support_event(sc->sc_evdev, EV_REP);
+
+ for (i = 0; i < APPLEIR_NKEYS; i++) {
+ if (appleir_keymap[i] != KEY_RESERVED)
+ evdev_support_key(sc->sc_evdev, appleir_keymap[i]);
+ }
+ for (i = 0; i < GENERIC_NKEYS; i++)
+ evdev_support_key(sc->sc_evdev, generic_keymap[i].key);
+
+ error = evdev_register_mtx(sc->sc_evdev, &sc->sc_mtx);
+ if (error != 0) {
+ device_printf(dev, "evdev_register_mtx failed: %d\n", error);
+ goto fail;
+ }
+
+ hidbus_set_intr(dev, appleir_intr, sc);
+
+ error = hid_intr_start(dev);
+ if (error != 0) {
+ device_printf(dev, "hid_intr_start failed: %d\n", error);
+ goto fail;
+ }
+
+ return (0);
+
+fail:
+ if (sc->sc_evdev != NULL)
+ evdev_free(sc->sc_evdev);
+ callout_drain(&sc->sc_co);
+ callout_drain(&sc->sc_twoco);
+ mtx_destroy(&sc->sc_mtx);
+ return (error);
+}
+
+static int
+appleir_detach(device_t dev)
+{
+ struct appleir_softc *sc = device_get_softc(dev);
+ int error;
+
+ error = hid_intr_stop(dev);
+ if (error != 0) {
+ device_printf(dev, "hid_intr_stop failed: %d\n", error);
+ return (error);
+ }
+ callout_drain(&sc->sc_co);
+ callout_drain(&sc->sc_twoco);
+ evdev_free(sc->sc_evdev);
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+static device_method_t appleir_methods[] = {
+ DEVMETHOD(device_probe, appleir_probe),
+ DEVMETHOD(device_attach, appleir_attach),
+ DEVMETHOD(device_detach, appleir_detach),
+ DEVMETHOD_END
+};
+
+static driver_t appleir_driver = {
+ "appleir",
+ appleir_methods,
+ sizeof(struct appleir_softc)
+};
+
+DRIVER_MODULE(appleir, hidbus, appleir_driver, NULL, NULL);
+MODULE_DEPEND(appleir, hid, 1, 1, 1);
+MODULE_DEPEND(appleir, hidbus, 1, 1, 1);
+MODULE_DEPEND(appleir, evdev, 1, 1, 1);
+MODULE_VERSION(appleir, 1);
+HID_PNP_INFO(appleir_devs);
diff --git a/sys/dev/hid/bcm5974.c b/sys/dev/hid/bcm5974.c
index 442e8905e9bc..e2efeb08eb2e 100644
--- a/sys/dev/hid/bcm5974.c
+++ b/sys/dev/hid/bcm5974.c
@@ -120,6 +120,7 @@ enum tp_type {
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
#define USES_COMPACT_REPORT 2
+#define SUPPORTS_FORCETOUCH 4
struct tp_type_params {
uint8_t caps; /* device capability bitmask */
@@ -146,13 +147,13 @@ struct tp_type_params {
.delta = 0,
},
[TYPE4] = {
- .caps = HAS_INTEGRATED_BUTTON,
+ .caps = HAS_INTEGRATED_BUTTON | SUPPORTS_FORCETOUCH,
.button = 31,
.offset = 23 * 2,
.delta = 2,
},
[TYPE_MT2U] = {
- .caps = HAS_INTEGRATED_BUTTON | USES_COMPACT_REPORT,
+ .caps = HAS_INTEGRATED_BUTTON | USES_COMPACT_REPORT | SUPPORTS_FORCETOUCH,
.button = 1,
.offset = 12,
.delta = 0,
@@ -407,6 +408,10 @@ static const struct bcm5974_dev_params bcm5974_dev_params[BCM5974_FLAG_MAX] = {
HID_BVPI(BUS_USB, USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i), \
HID_TLC(BCM5974_TLC_PAGE, BCM5974_TLC_USAGE), \
}
+#define BCM5974_DEV_USB(v,p,i) { \
+ HID_BVPI(BUS_USB, USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i), \
+ HID_TLC(HUP_VENDOR_00, 0x0001), \
+}
#define APPLE_HID "APP000D"
#define BCM5974_DEV_SPI(hid, i) { \
@@ -416,60 +421,60 @@ static const struct bcm5974_dev_params bcm5974_dev_params[BCM5974_FLAG_MAX] = {
static const struct hid_device_id bcm5974_devs[] = {
/* MacbookAir1.1 */
- BCM5974_DEV(APPLE, WELLSPRING_ANSI, BCM5974_FLAG_WELLSPRING1),
- BCM5974_DEV(APPLE, WELLSPRING_ISO, BCM5974_FLAG_WELLSPRING1),
- BCM5974_DEV(APPLE, WELLSPRING_JIS, BCM5974_FLAG_WELLSPRING1),
+ BCM5974_DEV_USB(APPLE, WELLSPRING_ANSI, BCM5974_FLAG_WELLSPRING1),
+ BCM5974_DEV_USB(APPLE, WELLSPRING_ISO, BCM5974_FLAG_WELLSPRING1),
+ BCM5974_DEV_USB(APPLE, WELLSPRING_JIS, BCM5974_FLAG_WELLSPRING1),
/* MacbookProPenryn, aka wellspring2 */
- BCM5974_DEV(APPLE, WELLSPRING2_ANSI, BCM5974_FLAG_WELLSPRING2),
- BCM5974_DEV(APPLE, WELLSPRING2_ISO, BCM5974_FLAG_WELLSPRING2),
- BCM5974_DEV(APPLE, WELLSPRING2_JIS, BCM5974_FLAG_WELLSPRING2),
+ BCM5974_DEV_USB(APPLE, WELLSPRING2_ANSI, BCM5974_FLAG_WELLSPRING2),
+ BCM5974_DEV_USB(APPLE, WELLSPRING2_ISO, BCM5974_FLAG_WELLSPRING2),
+ BCM5974_DEV_USB(APPLE, WELLSPRING2_JIS, BCM5974_FLAG_WELLSPRING2),
/* Macbook5,1 (unibody), aka wellspring3 */
- BCM5974_DEV(APPLE, WELLSPRING3_ANSI, BCM5974_FLAG_WELLSPRING3),
- BCM5974_DEV(APPLE, WELLSPRING3_ISO, BCM5974_FLAG_WELLSPRING3),
- BCM5974_DEV(APPLE, WELLSPRING3_JIS, BCM5974_FLAG_WELLSPRING3),
+ BCM5974_DEV_USB(APPLE, WELLSPRING3_ANSI, BCM5974_FLAG_WELLSPRING3),
+ BCM5974_DEV_USB(APPLE, WELLSPRING3_ISO, BCM5974_FLAG_WELLSPRING3),
+ BCM5974_DEV_USB(APPLE, WELLSPRING3_JIS, BCM5974_FLAG_WELLSPRING3),
/* MacbookAir3,2 (unibody), aka wellspring4 */
- BCM5974_DEV(APPLE, WELLSPRING4_ANSI, BCM5974_FLAG_WELLSPRING4),
- BCM5974_DEV(APPLE, WELLSPRING4_ISO, BCM5974_FLAG_WELLSPRING4),
- BCM5974_DEV(APPLE, WELLSPRING4_JIS, BCM5974_FLAG_WELLSPRING4),
+ BCM5974_DEV_USB(APPLE, WELLSPRING4_ANSI, BCM5974_FLAG_WELLSPRING4),
+ BCM5974_DEV_USB(APPLE, WELLSPRING4_ISO, BCM5974_FLAG_WELLSPRING4),
+ BCM5974_DEV_USB(APPLE, WELLSPRING4_JIS, BCM5974_FLAG_WELLSPRING4),
/* MacbookAir3,1 (unibody), aka wellspring4 */
- BCM5974_DEV(APPLE, WELLSPRING4A_ANSI, BCM5974_FLAG_WELLSPRING4A),
- BCM5974_DEV(APPLE, WELLSPRING4A_ISO, BCM5974_FLAG_WELLSPRING4A),
- BCM5974_DEV(APPLE, WELLSPRING4A_JIS, BCM5974_FLAG_WELLSPRING4A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING4A_ANSI, BCM5974_FLAG_WELLSPRING4A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING4A_ISO, BCM5974_FLAG_WELLSPRING4A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING4A_JIS, BCM5974_FLAG_WELLSPRING4A),
/* Macbook8 (unibody, March 2011) */
- BCM5974_DEV(APPLE, WELLSPRING5_ANSI, BCM5974_FLAG_WELLSPRING5),
- BCM5974_DEV(APPLE, WELLSPRING5_ISO, BCM5974_FLAG_WELLSPRING5),
- BCM5974_DEV(APPLE, WELLSPRING5_JIS, BCM5974_FLAG_WELLSPRING5),
+ BCM5974_DEV_USB(APPLE, WELLSPRING5_ANSI, BCM5974_FLAG_WELLSPRING5),
+ BCM5974_DEV_USB(APPLE, WELLSPRING5_ISO, BCM5974_FLAG_WELLSPRING5),
+ BCM5974_DEV_USB(APPLE, WELLSPRING5_JIS, BCM5974_FLAG_WELLSPRING5),
/* MacbookAir4,1 (unibody, July 2011) */
- BCM5974_DEV(APPLE, WELLSPRING6A_ANSI, BCM5974_FLAG_WELLSPRING6A),
- BCM5974_DEV(APPLE, WELLSPRING6A_ISO, BCM5974_FLAG_WELLSPRING6A),
- BCM5974_DEV(APPLE, WELLSPRING6A_JIS, BCM5974_FLAG_WELLSPRING6A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING6A_ANSI, BCM5974_FLAG_WELLSPRING6A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING6A_ISO, BCM5974_FLAG_WELLSPRING6A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING6A_JIS, BCM5974_FLAG_WELLSPRING6A),
/* MacbookAir4,2 (unibody, July 2011) */
- BCM5974_DEV(APPLE, WELLSPRING6_ANSI, BCM5974_FLAG_WELLSPRING6),
- BCM5974_DEV(APPLE, WELLSPRING6_ISO, BCM5974_FLAG_WELLSPRING6),
- BCM5974_DEV(APPLE, WELLSPRING6_JIS, BCM5974_FLAG_WELLSPRING6),
+ BCM5974_DEV_USB(APPLE, WELLSPRING6_ANSI, BCM5974_FLAG_WELLSPRING6),
+ BCM5974_DEV_USB(APPLE, WELLSPRING6_ISO, BCM5974_FLAG_WELLSPRING6),
+ BCM5974_DEV_USB(APPLE, WELLSPRING6_JIS, BCM5974_FLAG_WELLSPRING6),
/* Macbook8,2 (unibody) */
- BCM5974_DEV(APPLE, WELLSPRING5A_ANSI, BCM5974_FLAG_WELLSPRING5A),
- BCM5974_DEV(APPLE, WELLSPRING5A_ISO, BCM5974_FLAG_WELLSPRING5A),
- BCM5974_DEV(APPLE, WELLSPRING5A_JIS, BCM5974_FLAG_WELLSPRING5A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING5A_ANSI, BCM5974_FLAG_WELLSPRING5A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING5A_ISO, BCM5974_FLAG_WELLSPRING5A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING5A_JIS, BCM5974_FLAG_WELLSPRING5A),
/* MacbookPro10,1 (unibody, June 2012) */
/* MacbookPro11,1-3 (unibody, June 2013) */
- BCM5974_DEV(APPLE, WELLSPRING7_ANSI, BCM5974_FLAG_WELLSPRING7),
- BCM5974_DEV(APPLE, WELLSPRING7_ISO, BCM5974_FLAG_WELLSPRING7),
- BCM5974_DEV(APPLE, WELLSPRING7_JIS, BCM5974_FLAG_WELLSPRING7),
+ BCM5974_DEV_USB(APPLE, WELLSPRING7_ANSI, BCM5974_FLAG_WELLSPRING7),
+ BCM5974_DEV_USB(APPLE, WELLSPRING7_ISO, BCM5974_FLAG_WELLSPRING7),
+ BCM5974_DEV_USB(APPLE, WELLSPRING7_JIS, BCM5974_FLAG_WELLSPRING7),
- /* MacbookPro10,2 (unibody, October 2012) */
- BCM5974_DEV(APPLE, WELLSPRING7A_ANSI, BCM5974_FLAG_WELLSPRING7A),
- BCM5974_DEV(APPLE, WELLSPRING7A_ISO, BCM5974_FLAG_WELLSPRING7A),
- BCM5974_DEV(APPLE, WELLSPRING7A_JIS, BCM5974_FLAG_WELLSPRING7A),
+ /* MacbookPro10,2 (unibody, October 2012) */
+ BCM5974_DEV_USB(APPLE, WELLSPRING7A_ANSI, BCM5974_FLAG_WELLSPRING7A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING7A_ISO, BCM5974_FLAG_WELLSPRING7A),
+ BCM5974_DEV_USB(APPLE, WELLSPRING7A_JIS, BCM5974_FLAG_WELLSPRING7A),
/* MacbookAir6,2 (unibody, June 2013) */
BCM5974_DEV(APPLE, WELLSPRING8_ANSI, BCM5974_FLAG_WELLSPRING8),
@@ -748,7 +753,8 @@ bcm5974_attach(device_t dev)
BCM5974_ABS(sc->sc_evdev, ABS_MT_POSITION_X, sc->sc_params->x);
BCM5974_ABS(sc->sc_evdev, ABS_MT_POSITION_Y, sc->sc_params->y);
/* finger pressure */
- BCM5974_ABS(sc->sc_evdev, ABS_MT_PRESSURE, sc->sc_params->p);
+ if ((sc->sc_params->tp->caps & SUPPORTS_FORCETOUCH) != 0)
+ BCM5974_ABS(sc->sc_evdev, ABS_MT_PRESSURE, sc->sc_params->p);
/* finger touch area */
BCM5974_ABS(sc->sc_evdev, ABS_MT_TOUCH_MAJOR, sc->sc_params->w);
BCM5974_ABS(sc->sc_evdev, ABS_MT_TOUCH_MINOR, sc->sc_params->w);
diff --git a/sys/dev/hid/hgame.c b/sys/dev/hid/hgame.c
index 8dde6b5550c9..693c5afee034 100644
--- a/sys/dev/hid/hgame.c
+++ b/sys/dev/hid/hgame.c
@@ -134,28 +134,20 @@ hgame_dpad_cb(HIDMAP_CB_ARGS)
data = ctx.data;
switch (HIDMAP_CB_UDATA64) {
case HUG_D_PAD_UP:
- if (sc->dpad_down)
- return (ENOMSG);
- evdev_push_abs(evdev, ABS_HAT0Y, (data == 0) ? 0 : -1);
sc->dpad_up = (data != 0);
+ evdev_push_abs(evdev, ABS_HAT0Y, sc->dpad_down - sc->dpad_up);
break;
case HUG_D_PAD_DOWN:
- if (sc->dpad_up)
- return (ENOMSG);
- evdev_push_abs(evdev, ABS_HAT0Y, (data == 0) ? 0 : 1);
sc->dpad_down = (data != 0);
+ evdev_push_abs(evdev, ABS_HAT0Y, sc->dpad_down - sc->dpad_up);
break;
case HUG_D_PAD_RIGHT:
- if (sc->dpad_left)
- return (ENOMSG);
- evdev_push_abs(evdev, ABS_HAT0X, (data == 0) ? 0 : 1);
sc->dpad_right = (data != 0);
+ evdev_push_abs(evdev, ABS_HAT0X, sc->dpad_right - sc->dpad_left);
break;
case HUG_D_PAD_LEFT:
- if (sc->dpad_right)
- return (ENOMSG);
- evdev_push_abs(evdev, ABS_HAT0X, (data == 0) ? 0 : -1);
sc->dpad_left = (data != 0);
+ evdev_push_abs(evdev, ABS_HAT0X, sc->dpad_right - sc->dpad_left);
break;
}
break;
diff --git a/sys/dev/hid/hid.h b/sys/dev/hid/hid.h
index e56f8ffe772b..f164e48341ed 100644
--- a/sys/dev/hid/hid.h
+++ b/sys/dev/hid/hid.h
@@ -57,8 +57,14 @@
#define HUP_SCALE 0x008c
#define HUP_CAMERA_CONTROL 0x0090
#define HUP_ARCADE 0x0091
+#define HUP_RESERVED_FF 0x00ff
#define HUP_FIDO 0xf1d0
-#define HUP_MICROSOFT 0xff00
+#define HUP_VENDOR_00 0xff00
+#define HUP_VENDOR_01 0xff01
+/* XXX compat */
+#define HUP_APPLE HUP_RESERVED_FF
+#define HUP_MICROSOFT HUP_VENDOR_00
+#define HUP_HP HUP_VENDOR_01
/* Usages, generic desktop */
#define HUG_POINTER 0x0001
diff --git a/sys/dev/hid/hkbd.c b/sys/dev/hid/hkbd.c
index 6255c42d3b62..c98f4be69169 100644
--- a/sys/dev/hid/hkbd.c
+++ b/sys/dev/hid/hkbd.c
@@ -73,6 +73,8 @@
#include <dev/hid/hidquirk.h>
#include <dev/hid/hidrdesc.h>
+#include "usbdevs.h"
+
#ifdef EVDEV_SUPPORT
#include <dev/evdev/input.h>
#include <dev/evdev/evdev.h>
@@ -97,6 +99,7 @@
static int hkbd_debug = 0;
#endif
static int hkbd_no_leds = 0;
+static int hkbd_apple_fn_mode = 0;
static SYSCTL_NODE(_hw_hid, OID_AUTO, hkbd, CTLFLAG_RW, 0, "USB keyboard");
#ifdef HID_DEBUG
@@ -105,6 +108,8 @@ SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, debug, CTLFLAG_RWTUN,
#endif
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, no_leds, CTLFLAG_RWTUN,
&hkbd_no_leds, 0, "Disables setting of keyboard leds");
+SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, apple_fn_mode, CTLFLAG_RWTUN,
+ &hkbd_apple_fn_mode, 0, "0 = Fn + F1..12 -> media, 1 = F1..F12 -> media");
#define INPUT_EPOCH global_epoch_preempt
@@ -126,6 +131,10 @@ SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, no_leds, CTLFLAG_RWTUN,
#define MOD_MIN 0xe0
#define MOD_MAX 0xe7
+/* check evdev_usb_scancodes[] for names */
+#define APPLE_FN_KEY 0xff
+#define APPLE_EJECT_KEY 0xec
+
struct hkbd_softc {
device_t sc_dev;
@@ -289,9 +298,9 @@ static const uint8_t hkbd_trtab[256] = {
NN, NN, NN, NN, NN, NN, NN, NN, /* D0 - D7 */
NN, NN, NN, NN, NN, NN, NN, NN, /* D8 - DF */
29, 42, 56, 105, 90, 54, 93, 106, /* E0 - E7 */
- NN, NN, NN, NN, NN, NN, NN, NN, /* E8 - EF */
+ NN, NN, NN, NN, 254, NN, NN, NN, /* E8 - EF */
NN, NN, NN, NN, NN, NN, NN, NN, /* F0 - F7 */
- NN, NN, NN, NN, NN, NN, NN, NN, /* F8 - FF */
+ NN, NN, NN, NN, NN, NN, NN, 255, /* F8 - FF */
};
static const uint8_t hkbd_boot_desc[] = { HID_KBD_BOOTPROTO_DESCR() };
@@ -516,13 +525,14 @@ hkbd_interrupt(struct hkbd_softc *sc)
continue;
hkbd_put_key(sc, key | KEY_PRESS);
- sc->sc_co_basetime = sbinuptime();
- sc->sc_delay = sc->sc_kbd.kb_delay1;
- hkbd_start_timer(sc);
-
- /* set repeat time for last key */
- sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1;
- sc->sc_repeat_key = key;
+ if (key != APPLE_FN_KEY) {
+ sc->sc_co_basetime = sbinuptime();
+ sc->sc_delay = sc->sc_kbd.kb_delay1;
+ hkbd_start_timer(sc);
+ /* set repeat time for last key */
+ sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1;
+ sc->sc_repeat_key = key;
+ }
}
/* synchronize old data with new data */
@@ -613,6 +623,16 @@ static uint32_t
hkbd_apple_fn(uint32_t keycode)
{
switch (keycode) {
+ case 0x0b: return 0x50; /* H -> LEFT ARROW */
+ case 0x0d: return 0x51; /* J -> DOWN ARROW */
+ case 0x0e: return 0x52; /* K -> UP ARROW */
+ case 0x0f: return 0x4f; /* L -> RIGHT ARROW */
+ case 0x36: return 0x4a; /* COMMA -> HOME */
+ case 0x37: return 0x4d; /* DOT -> END */
+ case 0x18: return 0x4b; /* U -> PGUP */
+ case 0x07: return 0x4e; /* D -> PGDN */
+ case 0x16: return 0x47; /* S -> SCROLLLOCK */
+ case 0x13: return 0x46; /* P -> SYSRQ/PRTSC */
case 0x28: return 0x49; /* RETURN -> INSERT */
case 0x2a: return 0x4c; /* BACKSPACE -> DEL */
case 0x50: return 0x4a; /* LEFT ARROW -> HOME */
@@ -623,6 +643,27 @@ hkbd_apple_fn(uint32_t keycode)
}
}
+/* separate so the sysctl doesn't butcher non-fn keys */
+static uint32_t
+hkbd_apple_fn_media(uint32_t keycode)
+{
+ switch (keycode) {
+ case 0x3a: return 0xc0; /* F1 -> BRIGHTNESS DOWN */
+ case 0x3b: return 0xc1; /* F2 -> BRIGHTNESS UP */
+ case 0x3c: return 0xc2; /* F3 -> SCALE (MISSION CTRL)*/
+ case 0x3d: return 0xc3; /* F4 -> DASHBOARD (LAUNCHPAD) */
+ case 0x3e: return 0xc4; /* F5 -> KBD BACKLIGHT DOWN */
+ case 0x3f: return 0xc5; /* F6 -> KBD BACKLIGHT UP */
+ case 0x40: return 0xea; /* F7 -> MEDIA PREV */
+ case 0x41: return 0xe8; /* F8 -> PLAY/PAUSE */
+ case 0x42: return 0xeb; /* F9 -> MEDIA NEXT */
+ case 0x43: return 0xef; /* F10 -> MUTE */
+ case 0x44: return 0xee; /* F11 -> VOLUME DOWN */
+ case 0x45: return 0xed; /* F12 -> VOLUME UP */
+ default: return keycode;
+ }
+}
+
static uint32_t
hkbd_apple_swap(uint32_t keycode)
{
@@ -675,18 +716,30 @@ hkbd_intr_callback(void *context, void *data, hid_size_t len)
/* clear modifiers */
modifiers = 0;
- /* scan through HID data */
+ /* scan through HID data and expose magic apple keys */
if ((sc->sc_flags & HKBD_FLAG_APPLE_EJECT) &&
(id == sc->sc_id_apple_eject)) {
- if (hid_get_data(buf, len, &sc->sc_loc_apple_eject))
+ if (hid_get_data(buf, len, &sc->sc_loc_apple_eject)) {
+ bit_set(sc->sc_ndata, APPLE_EJECT_KEY);
modifiers |= MOD_EJECT;
+ } else {
+ bit_clear(sc->sc_ndata, APPLE_EJECT_KEY);
+ }
}
if ((sc->sc_flags & HKBD_FLAG_APPLE_FN) &&
(id == sc->sc_id_apple_fn)) {
- if (hid_get_data(buf, len, &sc->sc_loc_apple_fn))
+ if (hid_get_data(buf, len, &sc->sc_loc_apple_fn)) {
+ bit_set(sc->sc_ndata, APPLE_FN_KEY);
modifiers |= MOD_FN;
+ } else {
+ bit_clear(sc->sc_ndata, APPLE_FN_KEY);
+ }
}
+ int apply_apple_fn_media = (modifiers & MOD_FN) ? 1 : 0;
+ if (hkbd_apple_fn_mode) /* toggle from sysctl value */
+ apply_apple_fn_media = !apply_apple_fn_media;
+
bit_foreach(sc->sc_loc_key_valid, HKBD_NKEYCODE, i) {
if (id != sc->sc_id_loc_key[i]) {
continue; /* invalid HID ID */
@@ -710,6 +763,8 @@ hkbd_intr_callback(void *context, void *data, hid_size_t len)
}
if (modifiers & MOD_FN)
key = hkbd_apple_fn(key);
+ if (apply_apple_fn_media)
+ key = hkbd_apple_fn_media(key);
if (sc->sc_flags & HKBD_FLAG_APPLE_SWAP)
key = hkbd_apple_swap(key);
if (key == KEY_NONE || key >= HKBD_NKEYCODE)
@@ -723,6 +778,8 @@ hkbd_intr_callback(void *context, void *data, hid_size_t len)
if (modifiers & MOD_FN)
key = hkbd_apple_fn(key);
+ if (apply_apple_fn_media)
+ key = hkbd_apple_fn_media(key);
if (sc->sc_flags & HKBD_FLAG_APPLE_SWAP)
key = hkbd_apple_swap(key);
if (key == KEY_NONE || key == KEY_ERROR || key >= HKBD_NKEYCODE)
@@ -783,25 +840,43 @@ hkbd_parse_hid(struct hkbd_softc *sc, const uint8_t *ptr, uint32_t len,
sc->sc_kbd_size = hid_report_size_max(ptr, len,
hid_input, &sc->sc_kbd_id);
+ const struct hid_device_info *hw = hid_get_device_info(sc->sc_dev);
+
/* investigate if this is an Apple Keyboard */
- if (hidbus_locate(ptr, len,
- HID_USAGE2(HUP_CONSUMER, HUG_APPLE_EJECT),
- hid_input, tlc_index, 0, &sc->sc_loc_apple_eject, &flags,
- &sc->sc_id_apple_eject, NULL)) {
- if (flags & HIO_VARIABLE)
- sc->sc_flags |= HKBD_FLAG_APPLE_EJECT |
- HKBD_FLAG_APPLE_SWAP;
- DPRINTFN(1, "Found Apple eject-key\n");
- }
- if (hidbus_locate(ptr, len,
- HID_USAGE2(0xFFFF, 0x0003),
- hid_input, tlc_index, 0, &sc->sc_loc_apple_fn, &flags,
- &sc->sc_id_apple_fn, NULL)) {
- if (flags & HIO_VARIABLE)
- sc->sc_flags |= HKBD_FLAG_APPLE_FN;
- DPRINTFN(1, "Found Apple FN-key\n");
+ if (hw->idVendor == USB_VENDOR_APPLE) { /* belt & braces! */
+ if (hidbus_locate(ptr, len,
+ HID_USAGE2(HUP_CONSUMER, HUG_APPLE_EJECT),
+ hid_input, tlc_index, 0, &sc->sc_loc_apple_eject, &flags,
+ &sc->sc_id_apple_eject, NULL)) {
+ if (flags & HIO_VARIABLE)
+ sc->sc_flags |= HKBD_FLAG_APPLE_EJECT |
+ HKBD_FLAG_APPLE_SWAP;
+ DPRINTFN(1, "Found Apple eject-key\n");
+ }
+ /*
+ * check the same vendor pages that linux does to find the one
+ * apple uses for the function key.
+ */
+ static const uint16_t apple_pages[] = {
+ HUP_APPLE, /* HID_UP_CUSTOM in linux */
+ HUP_MICROSOFT, /* HID_UP_MSVENDOR in linux */
+ HUP_HP, /* HID_UP_HPVENDOR2 in linux */
+ 0xFFFF /* Original FreeBSD check (Remove?) */
+ };
+ for (int i = 0; i < (int)nitems(apple_pages); i++) {
+ if (hidbus_locate(ptr, len,
+ HID_USAGE2(apple_pages[i], 0x0003),
+ hid_input, tlc_index, 0, &sc->sc_loc_apple_fn, &flags,
+ &sc->sc_id_apple_fn, NULL)) {
+ if (flags & HIO_VARIABLE)
+ sc->sc_flags |= HKBD_FLAG_APPLE_FN;
+ DPRINTFN(1, "Found Apple FN-key on page 0x%04x\n",
+ apple_pages[i]);
+ break;
+ }
+ }
}
-
+
/* figure out event buffer */
if (hidbus_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0x00),
diff --git a/sys/dev/hwpmc/hwpmc_amd.c b/sys/dev/hwpmc/hwpmc_amd.c
index cf44f9362a72..51505bfcff37 100644
--- a/sys/dev/hwpmc/hwpmc_amd.c
+++ b/sys/dev/hwpmc/hwpmc_amd.c
@@ -60,8 +60,8 @@ struct amd_descr {
};
static int amd_npmcs;
+static int amd_core_npmcs, amd_l3_npmcs, amd_df_npmcs;
static struct amd_descr amd_pmcdesc[AMD_NPMCS_MAX];
-
struct amd_event_code_map {
enum pmc_event pe_ev; /* enum value */
uint16_t pe_code; /* encoded event mask */
@@ -664,10 +664,55 @@ amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
static int
amd_get_msr(int ri, uint32_t *msr)
{
+ int df_idx;
+
KASSERT(ri >= 0 && ri < amd_npmcs,
("[amd,%d] ri %d out of range", __LINE__, ri));
- *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
+ /*
+ * Map counter row index to RDPMC ECX value.
+ *
+ * AMD BKDG 24594 rev 3.37, page 440,
+ * "RDPMC Read Performance-Monitoring Counter":
+ * ECX 0-5: Core counters 0-5
+ * ECX 6-9: DF/Northbridge counters 0-3
+ * ECX 10-15: L3 Cache counters 0-5
+ * ECX 16-27: DF/Northbridge counters 4-15
+ *
+ * AMD PPR 57930-A0 section 2.1.9,
+ * "Register Sharing" for DF counter details.
+ */
+ if (ri < amd_core_npmcs) {
+ /* ECX 0-5: Core counters */
+ *msr = ri;
+ } else if (ri < amd_core_npmcs + amd_l3_npmcs) {
+ /* ECX 10-15: L3 Cache counters */
+ *msr = 10 + (ri - amd_core_npmcs);
+ } else {
+ /* ECX 6-9: DF counters 0-3
+ * ECX 16-27: DF counters 4-15 */
+ df_idx = ri - amd_core_npmcs - amd_l3_npmcs;
+ if (df_idx < 4)
+ *msr = 6 + df_idx;
+ else if (df_idx < 16)
+ *msr = 16 + (df_idx - 4);
+ else
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * Return the capabilities of the given PMC.
+ */
+static int
+amd_get_caps(int ri, uint32_t *caps)
+{
+ KASSERT(ri >= 0 && ri < amd_npmcs,
+ ("[amd,%d] ri %d out of range", __LINE__, ri));
+
+ *caps = amd_pmcdesc[ri].pm_descr.pd_caps;
+
return (0);
}
@@ -767,7 +812,6 @@ pmc_amd_initialize(void)
enum pmc_cputype cputype;
int error, i, ncpus, nclasses;
int family, model, stepping;
- int amd_core_npmcs, amd_l3_npmcs, amd_df_npmcs;
struct amd_descr *d;
/*
@@ -928,6 +972,7 @@ pmc_amd_initialize(void)
pcd->pcd_start_pmc = amd_start_pmc;
pcd->pcd_stop_pmc = amd_stop_pmc;
pcd->pcd_write_pmc = amd_write_pmc;
+ pcd->pcd_get_caps = amd_get_caps;
pmc_mdep->pmd_cputype = cputype;
pmc_mdep->pmd_intr = amd_intr;
diff --git a/sys/dev/hwpmc/hwpmc_ibs.c b/sys/dev/hwpmc/hwpmc_ibs.c
index 66d3260cf040..a230288f157e 100644
--- a/sys/dev/hwpmc/hwpmc_ibs.c
+++ b/sys/dev/hwpmc/hwpmc_ibs.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2026, Ali Jose Mashtizadeh
- * All rights reserved.
+ * Copyright (c) 2026, Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -271,7 +270,7 @@ ibs_stop_pmc(int cpu __diagused, int ri, struct pmc *pm)
* Turn off the ENABLE bit, but unfortunately there are a few quirks
* that generate excess NMIs. Workaround #420 in the Revision Guide
* for AMD Family 10h Processors 41322 Rev. 3.92 March 2012. requires
- * that we clear the count before clearing enable.
+ * that we clear the max count before clearing enable.
*
* Even after clearing the counter spurious NMIs are still possible so
* we use a per-CPU atomic variable to notify the interrupt handler we
@@ -291,7 +290,7 @@ ibs_stop_pmc(int cpu __diagused, int ri, struct pmc *pm)
wrmsr(IBS_FETCH_CTL, config);
break;
case IBS_PMC_OP:
- wrmsr(IBS_FETCH_CTL, config & ~IBS_FETCH_CTL_MAXCNTMASK);
+ wrmsr(IBS_OP_CTL, config & ~IBS_OP_CTL_MAXCNTMASK);
DELAY(1);
config &= ~IBS_OP_CTL_ENABLE;
wrmsr(IBS_OP_CTL, config);
@@ -343,6 +342,8 @@ pmc_ibs_process_fetch(struct pmc *pm, struct trapframe *tf, uint64_t config)
}
pmc_process_interrupt_mp(PMC_HR, pm, tf, &mpd);
+
+ wrmsr(IBS_FETCH_CTL, pm->pm_md.pm_ibs.ibs_ctl | IBS_FETCH_CTL_ENABLE);
}
static void
diff --git a/sys/dev/hwpmc/hwpmc_ibs.h b/sys/dev/hwpmc/hwpmc_ibs.h
index 4449b44c8368..1616a746ffef 100644
--- a/sys/dev/hwpmc/hwpmc_ibs.h
+++ b/sys/dev/hwpmc/hwpmc_ibs.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2026, Ali Jose Mashtizadeh
- * All rights reserved.
+ * Copyright (c) 2026, Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,6 +66,18 @@
#define IBS_CTL_LVTOFFSETVALID (1ULL << 8)
#define IBS_CTL_LVTOFFSETMASK 0x0000000F
+/*
+ * The minimum sampling rate was selected to match the default used by other
+ * counters that was also found to be experimentally stable by providing enough
+ * time between consecutive NMIs. The maximum sample rate is determined by
+ * setting all available counter bits, i.e., all available bits except the
+ * bottom four that are zero extended.
+ */
+#define IBS_FETCH_MIN_RATE 65536
+#define IBS_FETCH_MAX_RATE 1048560
+#define IBS_OP_MIN_RATE 65536
+#define IBS_OP_MAX_RATE 134217712
+
/* IBS Fetch Control */
#define IBS_FETCH_CTL 0xC0011030 /* IBS Fetch Control */
#define IBS_FETCH_CTL_L3MISS (1ULL << 61) /* L3 Cache Miss */
@@ -82,7 +93,8 @@
#define IBS_FETCH_CTL_ENABLE (1ULL << 48) /* Enable */
#define IBS_FETCH_CTL_MAXCNTMASK 0x0000FFFFULL
-#define IBS_FETCH_CTL_TO_LAT(_c) ((_c >> 32) & 0x0000FFFF)
+#define IBS_FETCH_INTERVAL_TO_CTL(_c) (((_c) >> 4) & 0x0000FFFF)
+#define IBS_FETCH_CTL_TO_LAT(_c) (((_c) >> 32) & 0x0000FFFF)
#define IBS_FETCH_LINADDR 0xC0011031 /* Fetch Linear Address */
#define IBS_FETCH_PHYSADDR 0xC0011032 /* Fetch Physical Address */
@@ -95,11 +107,15 @@
/* IBS Execution Control */
#define IBS_OP_CTL 0xC0011033 /* IBS Execution Control */
+#define IBS_OP_CTL_LATFLTEN (1ULL << 63) /* Load Latency Filtering */
#define IBS_OP_CTL_COUNTERCONTROL (1ULL << 19) /* Counter Control */
#define IBS_OP_CTL_VALID (1ULL << 18) /* Valid */
#define IBS_OP_CTL_ENABLE (1ULL << 17) /* Enable */
#define IBS_OP_CTL_L3MISSONLY (1ULL << 16) /* L3 Miss Filtering */
-#define IBS_OP_CTL_MAXCNTMASK 0x0000FFFFULL
+#define IBS_OP_CTL_MAXCNTMASK 0x07F0FFFFULL
+
+#define IBS_OP_CTL_LDLAT_TO_CTL(_c) ((((ldlat) >> 7) - 1) << 59)
+#define IBS_OP_INTERVAL_TO_CTL(_c) ((((_c) >> 4) & 0x0000FFFFULL) | ((_c) & 0x07F00000))
#define IBS_OP_RIP 0xC0011034 /* IBS Op RIP */
#define IBS_OP_DATA 0xC0011035 /* IBS Op Data */
diff --git a/sys/dev/hwpmc/hwpmc_intel.c b/sys/dev/hwpmc/hwpmc_intel.c
index e1788a9ea409..942cadfae4cf 100644
--- a/sys/dev/hwpmc/hwpmc_intel.c
+++ b/sys/dev/hwpmc/hwpmc_intel.c
@@ -247,6 +247,12 @@ pmc_intel_initialize(void)
cputype = PMC_CPU_INTEL_ATOM_TREMONT;
nclasses = 3;
break;
+ case 0xAA:
+ case 0xAC:
+ case 0xB5:
+ cputype = PMC_CPU_INTEL_METEOR_LAKE;
+ nclasses = 3;
+ break;
case 0xAD:
case 0xAE:
cputype = PMC_CPU_INTEL_GRANITE_RAPIDS;
diff --git a/sys/dev/hwpmc/hwpmc_logging.c b/sys/dev/hwpmc/hwpmc_logging.c
index 8fd7ef06a977..4f507523b6ab 100644
--- a/sys/dev/hwpmc/hwpmc_logging.c
+++ b/sys/dev/hwpmc/hwpmc_logging.c
@@ -58,6 +58,7 @@
#include <sys/uio.h>
#include <sys/unistd.h>
#include <sys/vnode.h>
+#include <sys/syslog.h>
#if defined(__i386__) || defined(__amd64__)
#include <machine/clock.h>
@@ -1236,24 +1237,39 @@ pmclog_initialize(void)
struct pmclog_buffer *plb;
int domain, ncpus, total;
- if (pmclog_buffer_size <= 0 || pmclog_buffer_size > 16*1024) {
- (void) printf("hwpmc: tunable logbuffersize=%d must be "
- "greater than zero and less than or equal to 16MB.\n",
- pmclog_buffer_size);
+ if (pmclog_buffer_size <= 0 ||
+ pmclog_buffer_size > PMC_LOG_BUFFER_SIZE_MAX) {
+ log(LOG_WARNING,
+ "hwpmc: logbuffersize=%d must be greater than zero "
+ "and less than or equal to %d, resetting to %d\n",
+ pmclog_buffer_size, PMC_LOG_BUFFER_SIZE_MAX,
+ PMC_LOG_BUFFER_SIZE);
+
pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
}
if (pmc_nlogbuffers_pcpu <= 0) {
- (void) printf("hwpmc: tunable nlogbuffers=%d must be greater "
- "than zero.\n", pmc_nlogbuffers_pcpu);
+ log(LOG_WARNING,
+ "hwpmc: nbuffers_pcpu=%d must be greater than zero, "
+ "resetting to %d\n",
+ pmc_nlogbuffers_pcpu, PMC_NLOGBUFFERS_PCPU);
+
pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU;
}
- if (pmc_nlogbuffers_pcpu*pmclog_buffer_size > 32*1024) {
- (void) printf("hwpmc: memory allocated pcpu must be less than 32MB (is %dK).\n",
- pmc_nlogbuffers_pcpu*pmclog_buffer_size);
+
+ if (pmc_nlogbuffers_pcpu * pmclog_buffer_size >
+ PMC_NLOGBUFFERS_PCPU_MEM_MAX) {
+ log(LOG_WARNING,
+ "hwpmc: nbuffers_pcpu=%d * logbuffersize=%d exceeds "
+ "%dMB per CPU limit, resetting to defaults (%d * %d)\n",
+ pmc_nlogbuffers_pcpu, pmclog_buffer_size,
+ PMC_NLOGBUFFERS_PCPU_MEM_MAX / 1024,
+ PMC_NLOGBUFFERS_PCPU, PMC_LOG_BUFFER_SIZE);
+
pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU;
pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
}
+
for (domain = 0; domain < vm_ndomains; domain++) {
ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus;
total = ncpus * pmc_nlogbuffers_pcpu;
@@ -1270,6 +1286,7 @@ pmclog_initialize(void)
pmc_plb_rele_unlocked(plb);
}
}
+
mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF);
}
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 1fa021429c5a..9533cb81b4a1 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -198,7 +198,6 @@ static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
static int pmc_debugflags_parse(char *newstr, char *fence);
#endif
-static bool pmc_is_multipart(struct pmc_sample *ps);
static void pmc_multipart_add(struct pmc_sample *ps, int type,
int length);
static void pmc_multipart_copydata(struct pmc_sample *ps,
@@ -818,11 +817,9 @@ pmc_force_context_switch(void)
uint64_t
pmc_rdtsc(void)
{
-#if defined(__i386__) || defined(__amd64__)
- if (__predict_true(amd_feature & AMDID_RDTSCP))
- return (rdtscp());
- else
- return (rdtsc());
+#if defined(__i386__)
+ /* Unfortunately get_cyclecount on i386 uses cpu_ticks. */
+ return (rdtsc());
#else
return (get_cyclecount());
#endif
@@ -4538,6 +4535,51 @@ pmc_syscall_handler(struct thread *td, void *syscall_args)
}
break;
+ /*
+ * Get the PMC capabilities
+ */
+
+ case PMC_OP_GETCAPS:
+ {
+ struct pmc_op_caps c;
+ struct pmc *pm;
+ struct pmc_classdep *pcd;
+ pmc_id_t pmcid;
+ int adjri, ri;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &c, sizeof(c))) != 0)
+ break;
+
+ pmcid = c.pm_pmcid;
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ KASSERT(pmcid == pm->pm_id,
+ ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
+ pm->pm_id, pmcid));
+
+ ri = PMC_TO_ROWINDEX(pm);
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+
+ /*
+ * If PMC class has no GETCAPS return the class capabilities
+ * otherwise get the per counter capabilities.
+ */
+ if (pcd->pcd_get_caps == NULL) {
+ c.pm_caps = pcd->pcd_caps;
+ } else {
+ error = (*pcd->pcd_get_caps)(adjri, &c.pm_caps);
+ if (error < 0)
+ break;
+ }
+
+ if ((error = copyout(&c, arg, sizeof(c))) < 0)
+ break;
+ }
+ break;
default:
error = EINVAL;
@@ -4593,12 +4635,6 @@ pmc_post_callchain_callback(void)
return;
}
-static bool
-pmc_is_multipart(struct pmc_sample *ps)
-{
- return ((ps->ps_flags & PMC_CC_F_MULTIPART) != 0);
-}
-
static void
pmc_multipart_add(struct pmc_sample *ps, int type, int length)
{
diff --git a/sys/dev/hyperv/vmbus/x86/hyperv_reg.h b/sys/dev/hyperv/vmbus/x86/hyperv_reg.h
index 0597a1fea953..e7560d00f25e 100644
--- a/sys/dev/hyperv/vmbus/x86/hyperv_reg.h
+++ b/sys/dev/hyperv/vmbus/x86/hyperv_reg.h
@@ -45,4 +45,10 @@
#define CPUID_LEAF_HV_IDENTITY 0x40000002
#define CPUID_LEAF_HV_FEATURES 0x40000003
#define CPUID_LEAF_HV_RECOMMENDS 0x40000004
+
+#define CPUID_LEAF_HV_STACK_INTERFACE 0x40000081
+#define HYPERV_STACK_INTERFACE_EAX_SIG 0x31235356 /* "VS#1" */
+#define CPUID_LEAF_HV_STACK_PROPERTIES 0x40000082
+#define HYPERV_PROPERTIES_EXT_DEST_ID 0x00000004
+
#endif /* !_HYPERV_REG_H_ */
diff --git a/sys/dev/ichwd/i6300esbwd.c b/sys/dev/ichwd/i6300esbwd.c
index 03d504a350aa..e810dcd888c4 100644
--- a/sys/dev/ichwd/i6300esbwd.c
+++ b/sys/dev/ichwd/i6300esbwd.c
@@ -112,7 +112,6 @@ i6300esbwd_event(void *arg, unsigned int cmd, int *error)
cmd &= WD_INTERVAL;
if (cmd != 0 &&
(cmd < WD_TO_1MS || (cmd - WD_TO_1MS) >= WDT_PRELOAD_BIT)) {
- *error = EINVAL;
return;
}
timeout = 1 << (cmd - WD_TO_1MS);
@@ -148,6 +147,8 @@ i6300esbwd_event(void *arg, unsigned int cmd, int *error)
regval = i6300esbwd_lock_read(sc);
sc->locked = regval & WDT_LOCK;
}
+ /* Set error to 0 to indicate we did something. */
+ *error = 0;
}
static int
diff --git a/sys/dev/iicbus/iic.c b/sys/dev/iicbus/iic.c
index efb8569a23c0..c3fcb2dbdaed 100644
--- a/sys/dev/iicbus/iic.c
+++ b/sys/dev/iicbus/iic.c
@@ -31,11 +31,13 @@
#include <sys/abi_compat.h>
#include <sys/bus.h>
#include <sys/conf.h>
+#include <sys/file.h>
#include <sys/fcntl.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
+#include <sys/proc.h>
#include <sys/sx.h>
#include <sys/systm.h>
#include <sys/uio.h>
@@ -96,7 +98,10 @@ static void iic_identify(driver_t *driver, device_t parent);
static void iicdtor(void *data);
static int iicuio_move(struct iic_cdevpriv *priv, struct uio *uio, int last);
static int iicuio(struct cdev *dev, struct uio *uio, int ioflag);
-static int iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags, bool compat32);
+static int iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d,
+ int flags, bool compat32, bool kernel_msgs);
+static int iic_linux_rdwr(struct file *fp, struct iic_rdwr_data *d,
+ int flags, struct thread *td);
static device_method_t iic_methods[] = {
/* device interface */
@@ -163,6 +168,7 @@ iic_attach(device_t dev)
return (ENXIO);
}
sc->sc_devnode->si_drv1 = sc;
+ sc->sc_devnode->si_drv2 = (void *)iic_linux_rdwr;
return (0);
}
@@ -341,7 +347,7 @@ iic_copyinmsgs32(struct iic_rdwr_data *d, struct iic_msg *buf)
static int
iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags,
- bool compat32 __unused)
+ bool compat32 __unused, bool kernel_msgs)
{
#ifdef COMPAT_FREEBSD32
struct iic_rdwr_data dswab;
@@ -375,7 +381,11 @@ iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags,
error = iic_copyinmsgs32(d, buf);
else
#endif
- error = copyin(d->msgs, buf, sizeof(*d->msgs) * d->nmsgs);
+ if (kernel_msgs)
+ memcpy(buf, d->msgs, sizeof(*d->msgs) * d->nmsgs);
+ else
+ error = copyin(d->msgs, buf,
+ sizeof(*d->msgs) * d->nmsgs);
if (error != 0) {
free(buf, M_IIC);
return (error);
@@ -425,6 +435,27 @@ iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags,
}
static int
+iic_linux_rdwr(struct file *fp, struct iic_rdwr_data *d, int flags,
+ struct thread *td)
+{
+ struct file *saved_fp;
+ struct iic_cdevpriv *priv;
+ int error;
+
+ saved_fp = td->td_fpop;
+ td->td_fpop = fp;
+ error = devfs_get_cdevpriv((void **)&priv);
+ td->td_fpop = saved_fp;
+ if (error != 0)
+ return (error);
+
+ IIC_LOCK(priv);
+ error = iicrdwr(priv, d, flags, false, true);
+ IIC_UNLOCK(priv);
+ return (error);
+}
+
+static int
iicioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td)
{
#ifdef COMPAT_FREEBSD32
@@ -582,7 +613,7 @@ iicioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *t
compat32 = false;
#endif
error = iicrdwr(priv, (struct iic_rdwr_data *)data, flags,
- compat32);
+ compat32, false);
break;
diff --git a/sys/dev/iicbus/iic.h b/sys/dev/iicbus/iic.h
index f6e9360186e0..badfb76e92eb 100644
--- a/sys/dev/iicbus/iic.h
+++ b/sys/dev/iicbus/iic.h
@@ -31,6 +31,14 @@
#include <sys/ioccom.h>
+#ifdef _KERNEL
+struct file;
+struct iic_rdwr_data;
+struct thread;
+typedef int iic_linux_rdwr_t(struct file *fp, struct iic_rdwr_data *d,
+ int flags, struct thread *td);
+#endif
+
/* Designed to be compatible with linux's struct i2c_msg */
struct iic_msg
{
diff --git a/sys/dev/irdma/fbsd_kcompat.c b/sys/dev/irdma/fbsd_kcompat.c
index 32a9bdb4f969..4888c4ddd5ad 100644
--- a/sys/dev/irdma/fbsd_kcompat.c
+++ b/sys/dev/irdma/fbsd_kcompat.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2023 Intel Corporation
+ * Copyright (c) 2021 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -36,6 +36,7 @@
#include "ice_rdma.h"
#include "irdma_di_if.h"
#include "irdma_main.h"
+#include "icrdma_hw.h"
#include <sys/gsb_crc32.h>
#include <netinet/in_fib.h>
#include <netinet6/in6_fib.h>
@@ -44,6 +45,11 @@
/* additional QP debuging option. Keep false unless needed */
bool irdma_upload_context = false;
+u8 irdma_sysctl_max_ord = ICRDMA_MAX_ORD_SIZE;
+u8 irdma_sysctl_max_ird = ICRDMA_MAX_IRD_SIZE;
+u8 irdma_rdpu_bw_tun = 0;
+
+static void irdma_modify_rdpu_bw(struct irdma_pci_f *rf);
inline u32
irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
@@ -566,6 +572,9 @@ irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
/* Enable DCQCN algorithm by default */
rf->dcqcn_ena = true;
+
+ if (irdma_fw_major_ver(&rf->sc_dev) == 2 && irdma_rdpu_bw_tun)
+ irdma_modify_rdpu_bw(rf);
}
/**
@@ -593,12 +602,87 @@ irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
return 0;
}
+static void
+irdma_modify_rdpu_bw(struct irdma_pci_f *rf)
+{
+ u32 val;
+#define GL_RDPU_CNTRL 0x00052054
+
+ val = rd32(&rf->hw, GL_RDPU_CNTRL);
+ printf("pf%d Read GL_RDPU_CNTRL[%x] = 0x%08X",
+ if_getdunit(rf->peer_info->ifp), GL_RDPU_CNTRL, val);
+
+ /* Clear the load balancing bit */
+ val &= ~(0x1 << 2);
+ wr32(&rf->hw, GL_RDPU_CNTRL, val);
+ val = rd32(&rf->hw, GL_RDPU_CNTRL);
+ printf("pf%d Set GL_RDPU_CNTRL[%x] = 0x%08X",
+ if_getdunit(rf->peer_info->ifp), GL_RDPU_CNTRL, val);
+}
+
+enum irdma_qos_info {
+ IRDMA_QOS_DSCP_MAP = 1,
+ IRDMA_QOS_DSCP_MODE,
+ IRDMA_QOS_PRIO_TYPE,
+ IRDMA_QOS_QS_HANDLE,
+ IRDMA_QOS_REL_BW,
+ IRDMA_QOS_TC,
+ IRDMA_QOS_UP2TC
+};
+
enum irdma_cqp_stats_info {
IRDMA_CQP_REQ_CMDS = 28,
IRDMA_CQP_CMPL_CMDS = 29
};
static int
+irdma_sysctl_qos(SYSCTL_HANDLER_ARGS)
+{
+ struct irdma_sc_vsi *vsi = (struct irdma_sc_vsi *)arg1;
+ char rslt[192] = "no vsi available yet";
+ int rslt_size = sizeof(rslt) - 1;
+ int option = (int)arg2;
+ int a;
+
+ if (!vsi) {
+ return sysctl_handle_string(oidp, rslt, sizeof(rslt), req);
+
+ }
+
+ snprintf(rslt, sizeof(rslt), "");
+ switch (option) {
+ case IRDMA_QOS_PRIO_TYPE:
+ for (a = 0; a < IRDMA_MAX_USER_PRIORITY; a++)
+ snprintf(rslt, rslt_size, "%s %02x", rslt, vsi->qos[a].prio_type);
+ break;
+ case IRDMA_QOS_REL_BW:
+ for (a = 0; a < IRDMA_MAX_USER_PRIORITY; a++)
+ snprintf(rslt, rslt_size, "%s %d", rslt, vsi->qos[a].rel_bw);
+ break;
+ case IRDMA_QOS_QS_HANDLE:
+ for (a = 0; a < IRDMA_MAX_USER_PRIORITY; a++)
+ snprintf(rslt, rslt_size, "%s %d", rslt, vsi->qos[a].qs_handle);
+ break;
+ case IRDMA_QOS_TC:
+ for (a = 0; a < IRDMA_MAX_USER_PRIORITY; a++)
+ snprintf(rslt, rslt_size, "%s %d", rslt, vsi->qos[a].traffic_class);
+ break;
+ case IRDMA_QOS_UP2TC:
+ for (a = 0; a < IRDMA_MAX_USER_PRIORITY; a++)
+ snprintf(rslt, rslt_size, "%s %d", rslt, vsi->cfg_check[a].traffic_class);
+ break;
+ case IRDMA_QOS_DSCP_MAP:
+ for (a = 0; a < sizeof(vsi->dscp_map); a++)
+ snprintf(rslt, rslt_size, "%s%02x", rslt, vsi->dscp_map[a]);
+ break;
+ case IRDMA_QOS_DSCP_MODE:
+ snprintf(rslt, rslt_size, "%d", vsi->dscp_mode);
+ }
+
+ return sysctl_handle_string(oidp, rslt, sizeof(rslt), req);
+}
+
+static int
irdma_sysctl_cqp_stats(SYSCTL_HANDLER_ARGS)
{
struct irdma_sc_cqp *cqp = (struct irdma_sc_cqp *)arg1;
@@ -854,6 +938,7 @@ void
irdma_sysctl_settings(struct irdma_pci_f *rf)
{
struct sysctl_oid_list *irdma_sysctl_oid_list;
+ u8 ird_ord_limit;
irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
@@ -861,6 +946,79 @@ irdma_sysctl_settings(struct irdma_pci_f *rf)
OID_AUTO, "upload_context", CTLFLAG_RWTUN,
&irdma_upload_context, 0,
"allow for generating QP's upload context, default=0");
+
+ if (rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ return;
+
+#define ICRDMA_HW_IRD_ORD_LIMIT 128
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "ord_max_value", CTLFLAG_RDTUN,
+ &irdma_sysctl_max_ord, ICRDMA_MAX_ORD_SIZE,
+ "Limit Outbound RDMA Read Queue Depth, dflt=32, max=128");
+
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "ird_max_value", CTLFLAG_RDTUN,
+ &irdma_sysctl_max_ird, ICRDMA_MAX_IRD_SIZE,
+ "Limit Inbound RDMA Read Queue Depth, dflt=32, max=128");
+ /*
+ * Ensure the ird/ord is equal and not more than ICRDMA_HW_IRD_ORD_LIMIT
+ */
+ ird_ord_limit = min(irdma_sysctl_max_ord, irdma_sysctl_max_ird);
+ if (ird_ord_limit > ICRDMA_HW_IRD_ORD_LIMIT)
+ ird_ord_limit = ICRDMA_HW_IRD_ORD_LIMIT;
+ irdma_sysctl_max_ird = ird_ord_limit;
+ irdma_sysctl_max_ord = ird_ord_limit;
+
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "mod_rdpu_bw", CTLFLAG_RDTUN,
+ &irdma_rdpu_bw_tun, 0,
+ "Turn off RDPU BW balance, default=0");
+}
+
+/**
+ * irdma_qos_info_tunables_init - init tunables to read qos settings
+ * @rf: RDMA PCI function
+ */
+void
+irdma_qos_info_tunables_init(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_vsi *vsi = &rf->iwdev->vsi;
+ struct sysctl_oid_list *qos_oid_list;
+
+ qos_oid_list = SYSCTL_CHILDREN(rf->tun_info.qos_sysctl_tree);
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list,
+ OID_AUTO, "vsi_rel_bw", CTLFLAG_RD,
+ &vsi->qos_rel_bw, 0,
+ "qos_rel_bw");
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list,
+ OID_AUTO, "vsi_prio_type", CTLFLAG_RD,
+ &vsi->qos_prio_type, 0, "vsi prio type");
+ SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list,
+ OID_AUTO, "dscp_mode", CTLFLAG_RD | CTLTYPE_STRING,
+ vsi, IRDMA_QOS_DSCP_MODE, irdma_sysctl_qos, "A",
+ "show dscp_mode");
+ SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list, OID_AUTO,
+ "dscp_map", CTLFLAG_RD | CTLTYPE_STRING, vsi,
+ IRDMA_QOS_DSCP_MAP, irdma_sysctl_qos, "A",
+ "show dscp map");
+ SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list, OID_AUTO,
+ "up2tc", CTLFLAG_RD | CTLTYPE_STRING, vsi,
+ IRDMA_QOS_UP2TC, irdma_sysctl_qos, "A",
+ "up to tc mapping");
+ SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list, OID_AUTO,
+ "qs", CTLFLAG_RD | CTLTYPE_STRING, vsi,
+ IRDMA_QOS_QS_HANDLE, irdma_sysctl_qos, "A",
+ "qs_handle");
+ SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list, OID_AUTO,
+ "tc", CTLFLAG_RD | CTLTYPE_STRING, vsi, IRDMA_QOS_TC,
+ irdma_sysctl_qos, "A", "tc list");
+ SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list, OID_AUTO,
+ "rel_bw", CTLFLAG_RD | CTLTYPE_STRING, vsi,
+ IRDMA_QOS_REL_BW, irdma_sysctl_qos, "A", "relative bw");
+ SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, qos_oid_list, OID_AUTO,
+ "prio_type", CTLFLAG_RD | CTLTYPE_STRING, vsi,
+ IRDMA_QOS_PRIO_TYPE, irdma_sysctl_qos, "A",
+ "prio_type");
}
void
diff --git a/sys/dev/irdma/fbsd_kcompat.h b/sys/dev/irdma/fbsd_kcompat.h
index 064963bb93bd..cc51f1e7933a 100644
--- a/sys/dev/irdma/fbsd_kcompat.h
+++ b/sys/dev/irdma/fbsd_kcompat.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2023 Intel Corporation
+ * Copyright (c) 2021 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,6 +35,7 @@
#ifndef FBSD_KCOMPAT_H
#define FBSD_KCOMPAT_H
#include "ice_rdma.h"
+#include "irdma-abi.h"
#define TASKLET_DATA_TYPE unsigned long
#define TASKLET_FUNC_TYPE void (*)(TASKLET_DATA_TYPE)
@@ -77,9 +78,13 @@
void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev);
void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev);
+extern u8 irdma_sysctl_max_ord;
+extern u8 irdma_sysctl_max_ird;
+
struct irdma_tunable_info {
struct sysctl_ctx_list irdma_sysctl_ctx;
struct sysctl_oid *irdma_sysctl_tree;
+ struct sysctl_oid *qos_sysctl_tree;
struct sysctl_oid *sws_sysctl_tree;
char drv_ver[IRDMA_VER_LEN];
u8 roce_ena;
@@ -142,7 +147,7 @@ void irdma_destroy_ah(struct ib_ah *ibah, u32 flags);
void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags);
int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
-int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u8 *speed, u8 *width);
+int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width);
enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
u8 port_num);
int irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -190,6 +195,7 @@ int irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node, u32 *dest,
int arpindex);
void irdma_dcqcn_tunables_init(struct irdma_pci_f *rf);
void irdma_sysctl_settings(struct irdma_pci_f *rf);
+void irdma_qos_info_tunables_init(struct irdma_pci_f *rf);
void irdma_sw_stats_tunables_init(struct irdma_pci_f *rf);
u32 irdma_create_stag(struct irdma_device *iwdev);
void irdma_free_stag(struct irdma_device *iwdev, u32 stag);
@@ -201,6 +207,15 @@ int irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
struct irdma_mr;
struct irdma_cq;
struct irdma_cq_buf;
+int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr);
+int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr);
+struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ u64 virt, int access,
+ struct ib_udata *udata);
struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
diff --git a/sys/dev/irdma/ice_devids.h b/sys/dev/irdma/ice_devids.h
index 57a7f2f7c2af..0cf7aa6aee22 100644
--- a/sys/dev/irdma/ice_devids.h
+++ b/sys/dev/irdma/ice_devids.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2019 - 2020 Intel Corporation
+ * Copyright (c) 2019 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -88,4 +88,28 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Controller E830-CC for backplane */
+#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-CC for QSFP */
+#define ICE_DEV_ID_E830_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-CC for SFP */
+#define ICE_DEV_ID_E830_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-CC for SFP-DD */
+#define ICE_DEV_ID_E830_SFP_DD 0x12D4
+/* Intel(R) Ethernet Controller E830-C for backplane */
+#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5
+/* Intel(R) Ethernet Controller E830-XXV for backplane */
+#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC
+/* Intel(R) Ethernet Controller E830-C for QSFP */
+#define ICE_DEV_ID_E830C_QSFP 0x12D8
+/* Intel(R) Ethernet Controller E830-XXV for QSFP */
+#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD
+/* Intel(R) Ethernet Controller E830-C for SFP */
+#define ICE_DEV_ID_E830C_SFP 0x12DA
+/* Intel(R) Ethernet Controller E830-XXV for SFP */
+#define ICE_DEV_ID_E830_XXV_SFP 0x12DE
+/* Intel(R) Ethernet Controller E835-XXV for SFP */
+#define ICE_DEV_ID_E835_XXV_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835_QSFP 0x1249
#endif /* ICE_DEVIDS_H */
diff --git a/sys/dev/irdma/icrdma.c b/sys/dev/irdma/icrdma.c
index a4f3904a820c..aeb07addcff7 100644
--- a/sys/dev/irdma/icrdma.c
+++ b/sys/dev/irdma/icrdma.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2025 Intel Corporation
+ * Copyright (c) 2021 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -52,7 +52,7 @@
/**
* Driver version
*/
-char irdma_driver_version[] = "1.2.37-k";
+char irdma_driver_version[] = "1.3.56-k";
/**
* irdma_init_tunable - prepare tunables
@@ -76,6 +76,10 @@ irdma_init_tunable(struct irdma_pci_f *rf, uint8_t pf_id)
irdma_oid_list = SYSCTL_CHILDREN(t_info->irdma_sysctl_tree);
+ t_info->qos_sysctl_tree = SYSCTL_ADD_NODE(&t_info->irdma_sysctl_ctx,
+ irdma_oid_list, OID_AUTO,
+ "qos", CTLFLAG_RD,
+ NULL, "");
t_info->sws_sysctl_tree = SYSCTL_ADD_NODE(&t_info->irdma_sysctl_ctx,
irdma_oid_list, OID_AUTO,
"sw_stats", CTLFLAG_RD,
@@ -418,6 +422,10 @@ irdma_finalize_task(void *context, int pending)
"Starting deferred closing %d (%d)\n",
rf->peer_info->pf_id, if_getdunit(peer->ifp));
atomic_dec(&rf->dev_ctx.event_rfcnt);
+ if (rf->rdma_ver == IRDMA_GEN_2 && !rf->ftype) {
+ cancel_delayed_work_sync(&iwdev->rf->dwork_cqp_poll);
+ irdma_free_stag(iwdev->rf->iwdev, iwdev->rf->chk_stag);
+ }
wait_event_timeout(iwdev->suspend_wq,
!atomic_read(&rf->dev_ctx.event_rfcnt),
IRDMA_MAX_TIMEOUT);
@@ -441,7 +449,10 @@ irdma_finalize_task(void *context, int pending)
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
- l2params.mtu = peer->mtu;
+#define IRDMA_MIN_MTU_HEADERS IB_GRH_BYTES + IB_BTH_BYTES + 28
+ l2params.mtu = (peer->mtu) ? peer->mtu :
+ ib_mtu_enum_to_int(IB_MTU_256) +
+ IRDMA_MIN_MTU_HEADERS;
status = irdma_rt_init_hw(iwdev, &l2params);
if (status) {
irdma_pr_err("RT init failed %d\n", status);
@@ -454,12 +465,21 @@ irdma_finalize_task(void *context, int pending)
irdma_rt_deinit_hw(iwdev);
ib_dealloc_device(&iwdev->ibdev);
}
+ irdma_qos_info_tunables_init(rf);
irdma_sw_stats_tunables_init(rf);
req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
req.enable_filter = true;
IRDMA_DI_REQ_HANDLER(peer, &req);
irdma_reg_ipaddr_event_cb(rf);
atomic_inc(&rf->dev_ctx.event_rfcnt);
+ if (rf->rdma_ver == IRDMA_GEN_2 && !rf->ftype) {
+ INIT_DELAYED_WORK(&rf->dwork_cqp_poll, cqp_poll_worker);
+ rf->chk_stag = irdma_create_stag(rf->iwdev);
+ rf->used_mrs++;
+ mod_delayed_work(iwdev->cleanup_wq, &rf->dwork_cqp_poll,
+ msecs_to_jiffies(5000));
+ }
+
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
"Deferred opening finished %d (%d)\n",
rf->peer_info->pf_id, if_getdunit(peer->ifp));
@@ -562,8 +582,9 @@ irdma_probe(struct ice_rdma_peer *peer)
struct irdma_handler *hdl;
int err = 0;
- irdma_pr_info("probe: irdma-%s peer=%p, peer->pf_id=%d, peer->ifp=%p, peer->ifp->if_dunit=%d, peer->pci_mem->r_bustag=%p\n",
- irdma_driver_version, peer, peer->pf_id, peer->ifp,
+ irdma_pr_info("probe: irdma-%s peer=%p, peer->pf_id=%d, peer->ifp=%p\n",
+ irdma_driver_version, peer, peer->pf_id, peer->ifp);
+ irdma_pr_info("peer->ifp->if_dunit=%d, peer->pci_mem->r_bustag=%p\n",
if_getdunit(peer->ifp), (void *)(uintptr_t)peer->pci_mem->r_bustag);
hdl = irdma_find_handler(peer);
@@ -664,6 +685,7 @@ irdma_remove(struct ice_rdma_peer *peer)
sysctl_ctx_free(&iwdev->rf->tun_info.irdma_sysctl_ctx);
hdl->iwdev->rf->tun_info.irdma_sysctl_tree = NULL;
+ hdl->iwdev->rf->tun_info.qos_sysctl_tree = NULL;
hdl->iwdev->rf->tun_info.sws_sysctl_tree = NULL;
irdma_ctrl_deinit_hw(iwdev->rf);
diff --git a/sys/dev/irdma/icrdma_hw.c b/sys/dev/irdma/icrdma_hw.c
index a046bf18a616..b3ac9ea0f3de 100644
--- a/sys/dev/irdma/icrdma_hw.c
+++ b/sys/dev/irdma/icrdma_hw.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2023 Intel Corporation
+ * Copyright (c) 2017 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,6 +35,7 @@
#include "osdep.h"
#include "irdma_type.h"
#include "icrdma_hw.h"
+#include "irdma_main.h"
void disable_prefetch(struct irdma_hw *hw);
@@ -244,11 +245,12 @@ icrdma_init_hw(struct irdma_sc_dev *dev)
}
void
-irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u16 qs_handle)
+irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u8 prio, u16 qs_handle)
{
cc->config_ok = false;
cc->traffic_class = traffic_class;
cc->qs_handle = qs_handle;
+ cc->prio = prio;
cc->lfc_set = 0;
cc->pfc_set = 0;
}
@@ -256,16 +258,27 @@ irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u16 qs_
static bool
irdma_is_lfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
{
+ u32 temp;
u32 lfc = 1;
+ u32 rx_pause_enable, tx_pause_enable;
u8 fn_id = vsi->dev->hmc_fn_id;
- lfc &= (rd32(vsi->dev->hw,
- PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
- lfc &= (rd32(vsi->dev->hw,
- PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
+ if (irdma_fw_major_ver(vsi->dev) == 1) {
+ rx_pause_enable = PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0;
+ tx_pause_enable = PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0;
+ } else {
+ rx_pause_enable = CNV_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0;
+ tx_pause_enable = CNV_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0;
+ }
+
+#define LFC_ENABLE BIT_ULL(8)
+#define LFC_ENABLE_S 8
+ temp = rd32(vsi->dev->hw, rx_pause_enable + 4 * fn_id);
+ lfc &= FIELD_GET(LFC_ENABLE, temp);
+ temp = rd32(vsi->dev->hw, tx_pause_enable + 4 * fn_id);
+ lfc &= FIELD_GET(LFC_ENABLE, temp);
lfc &= rd32(vsi->dev->hw,
PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id);
-
if (lfc)
return true;
return false;
@@ -290,14 +303,21 @@ static bool
irdma_is_pfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
{
u32 pause;
+ u32 rx_pause_enable, tx_pause_enable;
u8 fn_id = vsi->dev->hmc_fn_id;
- pause = (rd32(vsi->dev->hw,
- PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >>
- cc->traffic_class) & BIT(0);
- pause &= (rd32(vsi->dev->hw,
- PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >>
- cc->traffic_class) & BIT(0);
+ if (irdma_fw_major_ver(vsi->dev) == 1) {
+ rx_pause_enable = PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0;
+ tx_pause_enable = PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0;
+ } else {
+ rx_pause_enable = CNV_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0;
+ tx_pause_enable = CNV_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0;
+ }
+
+ pause = (rd32(vsi->dev->hw, rx_pause_enable + 4 * fn_id) >>
+ cc->prio) & BIT(0);
+ pause &= (rd32(vsi->dev->hw, tx_pause_enable + 4 * fn_id) >>
+ cc->prio) & BIT(0);
return irdma_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) &&
pause;
@@ -314,17 +334,18 @@ irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
return cc->config_ok;
}
-#define IRDMA_RCV_WND_NO_FC 65536
-#define IRDMA_RCV_WND_FC 65536
+#define IRDMA_RCV_WND_NO_FC 0x1FFFC
+#define IRDMA_RCV_WND_FC 0x3FFFC
-#define IRDMA_CWND_NO_FC 0x1
-#define IRDMA_CWND_FC 0x18
+#define IRDMA_CWND_NO_FC 0x20
+#define IRDMA_CWND_FC 0x400
+#define IRDMA_CWND_DCQCN_FC 0x80000
#define IRDMA_RTOMIN_NO_FC 0x5
#define IRDMA_RTOMIN_FC 0x32
#define IRDMA_ACKCREDS_NO_FC 0x02
-#define IRDMA_ACKCREDS_FC 0x06
+#define IRDMA_ACKCREDS_FC 0x1E
static void
irdma_check_flow_ctrl(struct irdma_sc_vsi *vsi, u8 user_prio, u8 traffic_class)
@@ -372,7 +393,7 @@ irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp)
struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
irdma_init_config_check(cfg_chk,
- vsi->qos[i].traffic_class,
+ vsi->qos[i].traffic_class, i,
vsi->qos[i].qs_handle);
if (sc_qp->qs_handle == cfg_chk->qs_handle)
irdma_check_flow_ctrl(vsi, i, cfg_chk->traffic_class);
diff --git a/sys/dev/irdma/icrdma_hw.h b/sys/dev/irdma/icrdma_hw.h
index b413b478538a..0b48c69ba4ee 100644
--- a/sys/dev/irdma/icrdma_hw.h
+++ b/sys/dev/irdma/icrdma_hw.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2023 Intel Corporation
+ * Copyright (c) 2017 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -83,6 +83,16 @@
#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_2 0x001e31a8
#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_3 0x001e31aC
+#define CNV_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 0x001e2180
+#define CNV_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_1 0x001e2184
+#define CNV_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_2 0x001e2188
+#define CNV_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_3 0x001e218c
+
+#define CNV_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 0x001e21a0
+#define CNV_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_1 0x001e21a4
+#define CNV_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_2 0x001e21a8
+#define CVN_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_3 0x001e21ac
+
#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 0x001e34c0
#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_1 0x001e34c4
#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_2 0x001e34c8
@@ -120,8 +130,8 @@ enum icrdma_device_caps_const {
ICRDMA_MAX_SGE_RD = 13,
ICRDMA_MAX_STATS_COUNT = 128,
- ICRDMA_MAX_IRD_SIZE = 32,
- ICRDMA_MAX_ORD_SIZE = 32,
+ ICRDMA_MAX_IRD_SIZE = 8,
+ ICRDMA_MAX_ORD_SIZE = 8,
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
ICRDMA_MAX_PUSH_PAGE_COUNT = 256,
@@ -130,6 +140,7 @@ enum icrdma_device_caps_const {
void icrdma_init_hw(struct irdma_sc_dev *dev);
void irdma_init_config_check(struct irdma_config_check *cc,
u8 traffic_class,
+ u8 prio,
u16 qs_handle);
bool irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi);
void irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi,
diff --git a/sys/dev/irdma/irdma.h b/sys/dev/irdma/irdma.h
index e6e493f1854a..b745626b8b60 100644
--- a/sys/dev/irdma/irdma.h
+++ b/sys/dev/irdma/irdma.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2022 Intel Corporation
+ * Copyright (c) 2017 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -99,6 +99,7 @@
#define IRDMA_PFHMC_SDCMD_PMSDPARTSEL BIT(15)
#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_Q_INVALID_IDX 0xffff
enum irdma_dyn_idx_t {
IRDMA_IDX_ITR0 = 0,
@@ -195,6 +196,7 @@ struct irdma_uk_attrs {
u32 max_hw_wq_quanta;
u32 min_hw_cq_size;
u32 max_hw_cq_size;
+ u16 max_hw_push_len;
u16 max_hw_sq_chunk;
u16 min_hw_wq_size;
u8 hw_rev;
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
index f3ca761b32f6..669d2cf5cb10 100644
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2025 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -38,18 +38,110 @@ static void irdma_cm_post_event(struct irdma_cm_event *event);
static void irdma_disconnect_worker(struct work_struct *work);
/**
- * irdma_free_sqbuf - put back puda buffer if refcount is 0
+ * irdma_cm_node_cmp_state - Compare the state of a CM node
+ * @cm_node: Pointer to the CM node structure
+ * @state: The state to compare against
+ *
+ * This function checks if the current state of the given CM node matches
+ * the specified state.
+ *
+ * Return: true if the states match, false otherwise.
+ */
+static bool
+irdma_cm_node_cmp_state(struct irdma_cm_node *cm_node,
+ enum irdma_cm_node_state state)
+{
+
+ return cm_node->state == state;
+}
+
+/**
+ * irdma_cm_node_set_state - Set the state of a CM node
+ * @cm_node: Pointer to the CM node whose state is to be updated
+ * @state: The new state to set for the CM node
+ *
+ * This function updates the state of the specified CM node to the
+ * provided state and returns the previous state of the CM node.
+ *
+ * Return: The previous state of the CM node.
+ */
+static enum irdma_cm_node_state
+irdma_cm_node_set_state(struct irdma_cm_node *cm_node,
+ enum irdma_cm_node_state state)
+{
+ enum irdma_cm_node_state old_state;
+
+ old_state = cm_node->state;
+ cm_node->state = state;
+ return old_state;
+}
+
+/**
+ * irdma_rem_ref_sqbuf - put back puda buffer if refcount is 0
+ * @vsi: The VSI structure of the device
+ * @buf: puda buffer to free
+ */
+static int
+irdma_rem_ref_sqbuf(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf)
+{
+ struct irdma_puda_rsrc *ilq = vsi->ilq;
+ struct irdma_cm_node *cm_node = buf->scratch;
+ struct irdma_cm_core *cm_core;
+
+ if (!atomic_dec_and_test(&buf->pb_refcount))
+ return 0;
+
+ irdma_puda_ret_bufpool(ilq, buf);
+
+ if (cm_node) {
+ buf->scratch = NULL;
+ cm_core = cm_node->cm_core;
+ cm_core->cm_free_ah(cm_node);
+ }
+
+ return 1;
+}
+
+/**
+ * irdma_cm_ilq_cmpl_handler - callback function when ILQ completes a send
* @vsi: The VSI structure of the device
- * @bufp: puda buffer to free
+ * @bufp: puda buffer structure from sent packet
*/
void
-irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp)
+irdma_cm_ilq_cmpl_handler(struct irdma_sc_vsi *vsi, void *bufp)
{
struct irdma_puda_buf *buf = bufp;
- struct irdma_puda_rsrc *ilq = vsi->ilq;
- if (atomic_dec_and_test(&buf->refcount))
- irdma_puda_ret_bufpool(ilq, buf);
+ irdma_rem_ref_sqbuf(vsi, buf);
+}
+
+/**
+ * irdma_cm_send_buf - Sends a buffer using the PUDA ILQ
+ * @ilq: Pointer to the PUDA (Protocol Unit Data Agent) resource structure
+ * @buf: Pointer to the PUDA buffer to be sent
+ *
+ * This function is responsible for transmitting a buffer through the
+ * specified PUDA resource. It is typically used in the context of
+ * managing RDMA connections and their associated data transfers.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int
+irdma_cm_send_buf(
+ struct irdma_puda_rsrc *ilq,
+ struct irdma_puda_buf *buf
+)
+{
+ int ret;
+
+ if (!atomic_inc_not_zero(&buf->pb_refcount))
+ pr_err("irdma: puda buffer refcnt increase from zero\n");
+
+ ret = irdma_puda_send_buf(ilq, buf);
+ if (ret)
+ irdma_rem_ref_sqbuf(ilq->vsi, buf);
+
+ return ret;
}
/**
@@ -255,7 +347,7 @@ irdma_timer_list_prep(struct irdma_cm_core *cm_core,
HASH_FOR_EACH_RCU(cm_core->cm_hash_tbl, bkt, cm_node, list) {
if ((cm_node->close_entry || cm_node->send_entry) &&
- atomic_inc_not_zero(&cm_node->refcnt))
+ irdma_add_ref_cmnode(cm_node))
list_add(&cm_node->timer_entry, timer_list);
}
}
@@ -304,17 +396,16 @@ irdma_create_event(struct irdma_cm_node *cm_node,
static void
irdma_free_retrans_entry(struct irdma_cm_node *cm_node)
{
- struct irdma_device *iwdev = cm_node->iwdev;
struct irdma_timer_entry *send_entry;
send_entry = cm_node->send_entry;
+ cm_node->send_entry = NULL;
if (!send_entry)
return;
- cm_node->send_entry = NULL;
- irdma_free_sqbuf(&iwdev->vsi, send_entry->sqbuf);
+ irdma_rem_ref_sqbuf(&cm_node->iwdev->vsi, send_entry->sqbuf);
kfree(send_entry);
- atomic_dec(&cm_node->refcnt);
+ irdma_rem_ref_cmnode(cm_node);
}
/**
@@ -367,6 +458,7 @@ irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
}
sqbuf->ah_id = cm_node->ah->ah_info.ah_idx;
+ sqbuf->ah = cm_node->ah;
buf = sqbuf->mem.va;
if (options)
opts_len = (u32)options->size;
@@ -433,7 +525,7 @@ irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
if (pdata && pdata->addr)
memcpy(buf, pdata->addr, pdata->size);
- atomic_set(&sqbuf->refcount, 1);
+ atomic_set(&sqbuf->pb_refcount, 1);
irdma_debug_buf(vsi->dev, IRDMA_DEBUG_ILQ, "TRANSMIT ILQ BUFFER",
sqbuf->mem.va, sqbuf->totallen);
@@ -620,7 +712,7 @@ irdma_form_uda_cm_frame(struct irdma_cm_node *cm_node,
if (pdata && pdata->addr)
memcpy(buf, pdata->addr, pdata->size);
- atomic_set(&sqbuf->refcount, 1);
+ atomic_set(&sqbuf->pb_refcount, 1);
irdma_debug_buf(vsi->dev, IRDMA_DEBUG_ILQ, "TRANSMIT ILQ BUFFER",
sqbuf->mem.va, sqbuf->totallen);
@@ -667,11 +759,12 @@ irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
if (reset) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"cm_node=%p state=%d\n", cm_node, cm_node->state);
- atomic_inc(&cm_node->refcnt);
- irdma_send_reset(cm_node);
+ irdma_add_ref_cmnode(cm_node);
+ if (irdma_send_reset(cm_node))
+ irdma_rem_ref_cmnode(cm_node);
}
- cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
}
@@ -685,13 +778,13 @@ irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
{
irdma_cleanup_retrans_entry(cm_node);
cm_node->cm_core->stats_passive_errs++;
- cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"cm_node=%p state=%d\n", cm_node, cm_node->state);
if (reset)
irdma_send_reset(cm_node);
else
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
}
/**
@@ -717,7 +810,7 @@ irdma_event_connect_error(struct irdma_cm_event *event)
cm_id->provider_data = NULL;
irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
-ECONNRESET);
- irdma_rem_ref_cm_node(event->cm_node);
+ irdma_rem_ref_cmnode(event->cm_node);
}
/**
@@ -989,7 +1082,7 @@ irdma_send_mpa_reject(struct irdma_cm_node *cm_node,
if (!sqbuf)
return -ENOMEM;
- cm_node->state = IRDMA_CM_STATE_FIN_WAIT1;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_FIN_WAIT1);
return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
0);
@@ -1028,7 +1121,7 @@ irdma_negotiate_mpa_v2_ird_ord(struct irdma_cm_node *cm_node,
goto negotiate_done;
}
- if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ if (!irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_MPAREQ_SENT)) {
/* responder */
if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
cm_node->ird_size = 1;
@@ -1108,7 +1201,7 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
}
cm_node->mpa_frame_rev = mpa_frame->rev;
- if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ if (!irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_MPAREQ_SENT)) {
if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
IETF_MPA_KEY_SIZE)) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
@@ -1170,7 +1263,7 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
* @close_when_complete: is cm_node to be removed
*
* note - cm_node needs to be protected before calling this. Encase in:
- * irdma_rem_ref_cm_node(cm_core, cm_node);
+ * irdma_rem_ref_cmnode(cm_core, cm_node);
* irdma_schedule_cm_timer(...)
* atomic_inc(&cm_node->refcnt);
*/
@@ -1189,7 +1282,7 @@ irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
if (!new_send) {
if (type != IRDMA_TIMER_TYPE_CLOSE)
- irdma_free_sqbuf(vsi, sqbuf);
+ irdma_rem_ref_sqbuf(vsi, sqbuf);
return -ENOMEM;
}
@@ -1204,6 +1297,7 @@ irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
if (type == IRDMA_TIMER_TYPE_CLOSE) {
new_send->timetosend += (HZ / 10);
if (cm_node->close_entry) {
+ irdma_rem_ref_sqbuf(vsi, sqbuf);
kfree(new_send);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"already close entry\n");
@@ -1213,17 +1307,29 @@ irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
cm_node->close_entry = new_send;
} else { /* type == IRDMA_TIMER_TYPE_SEND */
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ if (cm_node->send_entry) {
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock,
+ flags);
+ irdma_rem_ref_sqbuf(vsi, sqbuf);
+ kfree(new_send);
+
+ return -EINVAL;
+ }
cm_node->send_entry = new_send;
- atomic_inc(&cm_node->refcnt);
+ irdma_add_ref_cmnode(cm_node);
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
new_send->timetosend = jiffies + IRDMA_RETRY_TIMEOUT;
- atomic_inc(&sqbuf->refcount);
- irdma_puda_send_buf(vsi->ilq, sqbuf);
+ if (sqbuf->ah)
+ atomic_inc(&sqbuf->ah->ah_info.ah_refcnt);
+
+ if (irdma_cm_send_buf(vsi->ilq, new_send->sqbuf))
+ cm_core->cm_free_ah(cm_node);
+
if (!send_retrans) {
irdma_cleanup_retrans_entry(cm_node);
if (close_when_complete)
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
return 0;
}
}
@@ -1247,21 +1353,22 @@ irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
static void
irdma_retrans_expired(struct irdma_cm_node *cm_node)
{
- enum irdma_cm_node_state state = cm_node->state;
+ enum irdma_cm_node_state state;
- cm_node->state = IRDMA_CM_STATE_CLOSED;
+ state = irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
switch (state) {
case IRDMA_CM_STATE_SYN_RCVD:
case IRDMA_CM_STATE_CLOSING:
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
break;
case IRDMA_CM_STATE_FIN_WAIT1:
case IRDMA_CM_STATE_LAST_ACK:
irdma_send_reset(cm_node);
break;
default:
- atomic_inc(&cm_node->refcnt);
- irdma_send_reset(cm_node);
+ irdma_add_ref_cmnode(cm_node);
+ if (irdma_send_reset(cm_node))
+ irdma_rem_ref_cmnode(cm_node);
irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
break;
}
@@ -1297,7 +1404,7 @@ irdma_handle_close_entry(struct irdma_cm_node *cm_node,
}
} else if (rem_node) {
/* TIME_WAIT state */
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
}
kfree(close_entry);
@@ -1352,7 +1459,7 @@ irdma_cm_timer_tick(struct timer_list *t)
if (!send_entry)
goto done;
if (time_after(send_entry->timetosend, jiffies)) {
- if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ if (!irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_OFFLOADED)) {
if (nexttimeout > send_entry->timetosend ||
!settimer) {
nexttimeout = send_entry->timetosend;
@@ -1364,8 +1471,8 @@ irdma_cm_timer_tick(struct timer_list *t)
goto done;
}
- if (cm_node->state == IRDMA_CM_STATE_OFFLOADED ||
- cm_node->state == IRDMA_CM_STATE_CLOSED) {
+ if (irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_OFFLOADED) ||
+ irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_CLOSED)) {
irdma_free_retrans_entry(cm_node);
goto done;
}
@@ -1376,7 +1483,7 @@ irdma_cm_timer_tick(struct timer_list *t)
spin_unlock_irqrestore(&cm_node->retrans_list_lock,
flags);
irdma_retrans_expired(cm_node);
- cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
goto done;
}
@@ -1384,18 +1491,20 @@ irdma_cm_timer_tick(struct timer_list *t)
vsi = &cm_node->iwdev->vsi;
if (!cm_node->ack_rcvd) {
- atomic_inc(&send_entry->sqbuf->refcount);
- irdma_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+ if (send_entry->sqbuf->ah)
+ atomic_inc(&send_entry->sqbuf->ah->ah_info.ah_refcnt);
+ if (irdma_cm_send_buf(vsi->ilq, send_entry->sqbuf))
+ cm_core->cm_free_ah(cm_node);
+
cm_node->cm_core->stats_pkt_retrans++;
}
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
if (send_entry->send_retrans) {
send_entry->retranscount--;
- timetosend = (IRDMA_RETRY_TIMEOUT <<
- (IRDMA_DEFAULT_RETRANS -
- send_entry->retranscount));
-
+ timetosend = IRDMA_RETRY_TIMEOUT <<
+ min(IRDMA_DEFAULT_RETRANS -
+ send_entry->retranscount, (u32)4);
send_entry->timetosend = jiffies +
min(timetosend, IRDMA_MAX_TIMEOUT);
if (nexttimeout > send_entry->timetosend || !settimer) {
@@ -1408,11 +1517,11 @@ irdma_cm_timer_tick(struct timer_list *t)
close_when_complete = send_entry->close_when_complete;
irdma_free_retrans_entry(cm_node);
if (close_when_complete)
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
}
done:
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
}
if (settimer) {
@@ -1489,8 +1598,15 @@ irdma_send_ack(struct irdma_cm_node *cm_node)
sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
SET_ACK);
- if (sqbuf)
- irdma_puda_send_buf(vsi->ilq, sqbuf);
+ if (sqbuf) {
+ if (sqbuf->ah)
+ atomic_inc(&sqbuf->ah->ah_info.ah_refcnt);
+
+ if (irdma_cm_send_buf(vsi->ilq, sqbuf))
+ cm_node->cm_core->cm_free_ah(cm_node);
+
+ irdma_rem_ref_sqbuf(vsi, sqbuf);
+ }
}
/**
@@ -1665,7 +1781,6 @@ u16
irdma_get_vlan_ipv4(struct iw_cm_id *cm_id, u32 *addr)
{
u16 vlan_id = 0xFFFF;
-
#ifdef INET
if_t netdev;
struct vnet *vnet = &init_net;
@@ -1831,7 +1946,7 @@ irdma_reset_list_prep(struct irdma_cm_core *cm_core,
HASH_FOR_EACH_RCU(cm_core->cm_hash_tbl, bkt, cm_node, list) {
if (cm_node->listener == listener &&
!cm_node->accelerated &&
- atomic_inc_not_zero(&cm_node->refcnt))
+ irdma_add_ref_cmnode(cm_node))
list_add(&cm_node->reset_entry, reset_list);
}
}
@@ -1869,21 +1984,20 @@ irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
cm_node = container_of(list_pos, struct irdma_cm_node,
reset_entry);
if (cm_node->state >= IRDMA_CM_STATE_FIN_WAIT1) {
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
continue;
}
irdma_cleanup_retrans_entry(cm_node);
err = irdma_send_reset(cm_node);
if (err) {
- cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"send reset failed\n");
} else {
- old_state = cm_node->state;
- cm_node->state = IRDMA_CM_STATE_LISTENER_DESTROYED;
+ old_state = irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_LISTENER_DESTROYED);
if (old_state != IRDMA_CM_STATE_MPAREQ_RCVD)
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
}
}
@@ -1969,7 +2083,7 @@ irdma_find_node(struct irdma_cm_core *cm_core,
cm_node->loc_port == loc_port && cm_node->rem_port == rem_port &&
!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
!memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr))) {
- if (!atomic_inc_not_zero(&cm_node->refcnt))
+ if (!irdma_add_ref_cmnode(cm_node))
goto exit;
rcu_read_unlock();
return cm_node;
@@ -2078,10 +2192,24 @@ irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
&cm_node->ah))
return -ENOMEM;
+ atomic_set(&cm_node->ah->ah_info.ah_refcnt, 1);
+
return 0;
}
/**
+ * irdma_cm_free_ah_worker - async free a cm address handle
+ * @work: pointer to ah structure
+ */
+static void
+irdma_cm_free_ah_worker(struct work_struct *work)
+{
+ struct irdma_sc_ah *ah = container_of(work, struct irdma_sc_ah, ah_free_work);
+
+ irdma_puda_free_ah(ah->dev, ah);
+}
+
+/**
* irdma_cm_free_ah - free a cm address handle
* @cm_node: The connection manager node to create AH for
*/
@@ -2090,8 +2218,14 @@ irdma_cm_free_ah(struct irdma_cm_node *cm_node)
{
struct irdma_device *iwdev = cm_node->iwdev;
- irdma_puda_free_ah(&iwdev->rf->sc_dev, cm_node->ah);
- cm_node->ah = NULL;
+ if (cm_node->ah) {
+ if (!atomic_dec_and_test(&cm_node->ah->ah_info.ah_refcnt))
+ return;
+
+ INIT_WORK(&cm_node->ah->ah_free_work, irdma_cm_free_ah_worker);
+ queue_work(iwdev->cleanup_wq, &cm_node->ah->ah_free_work);
+ cm_node->ah = NULL;
+ }
}
/**
@@ -2109,11 +2243,12 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
struct irdma_cm_node *cm_node;
int arpindex;
if_t netdev = iwdev->netdev;
+ int ret;
/* create an hte and cm_node for this instance */
cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
if (!cm_node)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* set our node specific transport info */
cm_node->ipv4 = cm_info->ipv4;
@@ -2170,8 +2305,10 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
kc_set_loc_seq_num_mss(cm_node);
arpindex = irdma_resolve_neigh_lpb_chk(iwdev, cm_node, cm_info);
- if (arpindex < 0)
+ if (arpindex < 0) {
+ ret = -EINVAL;
goto err;
+ }
ether_addr_copy(cm_node->rem_mac, iwdev->rf->arp_table[arpindex].mac_addr);
irdma_add_hte_node(cm_core, cm_node);
@@ -2181,7 +2318,7 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
err:
kfree(cm_node);
- return NULL;
+ return ERR_PTR(ret);
}
static void
@@ -2197,6 +2334,9 @@ irdma_destroy_connection(struct irdma_cm_node *cm_node)
"node destroyed before established\n");
atomic_dec(&cm_node->listener->pend_accepts_cnt);
}
+
+ if (cm_node->send_entry)
+ irdma_cleanup_retrans_entry(cm_node);
if (cm_node->close_entry)
irdma_handle_close_entry(cm_node, 0);
if (cm_node->listener) {
@@ -2237,11 +2377,28 @@ irdma_destroy_connection(struct irdma_cm_node *cm_node)
}
/**
- * irdma_rem_ref_cm_node - destroy an instance of a cm node
+ * irdma_add_ref_cmnode - add reference to an instance of a cm node
+ * @cm_node: connection's node
+ */
+bool
+irdma_add_ref_cmnode(struct irdma_cm_node *cm_node)
+{
+ if (atomic_inc_not_zero(&cm_node->refcnt))
+ return true;
+
+ /*
+ * Trying to add refcount to a cmnode being destroyed.
+ */
+
+ return false;
+}
+
+/**
+ * irdma_rem_ref_cmnode - destroy an instance of a cm node
* @cm_node: connection's node
*/
void
-irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
+irdma_rem_ref_cmnode(struct irdma_cm_node *cm_node)
{
struct irdma_cm_core *cm_core = cm_node->cm_core;
unsigned long flags;
@@ -2280,21 +2437,23 @@ irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
case IRDMA_CM_STATE_MPAREJ_RCVD:
cm_node->tcp_cntxt.rcv_nxt++;
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_LAST_ACK);
irdma_send_fin(cm_node);
break;
case IRDMA_CM_STATE_MPAREQ_SENT:
irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
cm_node->tcp_cntxt.rcv_nxt++;
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_CLOSED;
- atomic_inc(&cm_node->refcnt);
- irdma_send_reset(cm_node);
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
+ irdma_add_ref_cmnode(cm_node);
+ if (irdma_send_reset(cm_node))
+ irdma_rem_ref_cmnode(cm_node);
+
break;
case IRDMA_CM_STATE_FIN_WAIT1:
cm_node->tcp_cntxt.rcv_nxt++;
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_CLOSING;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSING);
irdma_send_ack(cm_node);
/*
* Wait for ACK as this is simultaneous close. After we receive ACK, do not send anything. Just rm the
@@ -2304,7 +2463,7 @@ irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
case IRDMA_CM_STATE_FIN_WAIT2:
cm_node->tcp_cntxt.rcv_nxt++;
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_TIME_WAIT;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_TIME_WAIT);
irdma_send_ack(cm_node);
irdma_schedule_cm_timer(cm_node, NULL, IRDMA_TIMER_TYPE_CLOSE,
1, 0);
@@ -2312,8 +2471,8 @@ irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
case IRDMA_CM_STATE_TIME_WAIT:
cm_node->tcp_cntxt.rcv_nxt++;
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_CLOSED;
- irdma_rem_ref_cm_node(cm_node);
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
+ irdma_rem_ref_cmnode(cm_node);
break;
case IRDMA_CM_STATE_OFFLOADED:
default:
@@ -2347,7 +2506,7 @@ irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
/* Drop down to MPA_V1 */
cm_node->mpa_frame_rev = IETF_MPA_V1;
/* send a syn and goto syn sent state */
- cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_SYN_SENT);
if (irdma_send_syn(cm_node, 0))
irdma_active_open_err(cm_node, false);
break;
@@ -2374,8 +2533,8 @@ irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
case IRDMA_CM_STATE_FIN_WAIT1:
case IRDMA_CM_STATE_LAST_ACK:
case IRDMA_CM_STATE_TIME_WAIT:
- cm_node->state = IRDMA_CM_STATE_CLOSED;
- irdma_rem_ref_cm_node(cm_node);
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
+ irdma_rem_ref_cmnode(cm_node);
break;
default:
break;
@@ -2400,7 +2559,7 @@ irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
err = irdma_parse_mpa(cm_node, dataloc, &res_type, datasize);
if (err) {
- if (cm_node->state == IRDMA_CM_STATE_MPAREQ_SENT)
+ if (irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_MPAREQ_SENT))
irdma_active_open_err(cm_node, true);
else
irdma_passive_open_err(cm_node, true);
@@ -2412,7 +2571,7 @@ irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
if (res_type == IRDMA_MPA_REQUEST_REJECT)
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"state for reject\n");
- cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_MPAREQ_RCVD);
type = IRDMA_CM_EVENT_MPA_REQ;
irdma_send_ack(cm_node); /* ACK received MPA request */
atomic_set(&cm_node->passive_state,
@@ -2422,10 +2581,10 @@ irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
irdma_cleanup_retrans_entry(cm_node);
if (res_type == IRDMA_MPA_REQUEST_REJECT) {
type = IRDMA_CM_EVENT_MPA_REJECT;
- cm_node->state = IRDMA_CM_STATE_MPAREJ_RCVD;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_MPAREJ_RCVD);
} else {
type = IRDMA_CM_EVENT_CONNECTED;
- cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_OFFLOADED);
}
irdma_send_ack(cm_node);
break;
@@ -2542,12 +2701,13 @@ irdma_handle_syn_pkt(struct irdma_cm_node *cm_node,
cm_node->accept_pend = 1;
atomic_inc(&cm_node->listener->pend_accepts_cnt);
- cm_node->state = IRDMA_CM_STATE_SYN_RCVD;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_SYN_RCVD);
break;
case IRDMA_CM_STATE_CLOSED:
irdma_cleanup_retrans_entry(cm_node);
- atomic_inc(&cm_node->refcnt);
- irdma_send_reset(cm_node);
+ irdma_add_ref_cmnode(cm_node);
+ if (irdma_send_reset(cm_node))
+ irdma_rem_ref_cmnode(cm_node);
break;
case IRDMA_CM_STATE_OFFLOADED:
case IRDMA_CM_STATE_ESTABLISHED:
@@ -2605,7 +2765,7 @@ irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
cm_node);
break;
}
- cm_node->state = IRDMA_CM_STATE_MPAREQ_SENT;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_MPAREQ_SENT);
break;
case IRDMA_CM_STATE_MPAREQ_RCVD:
irdma_passive_open_err(cm_node, true);
@@ -2613,14 +2773,15 @@ irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
case IRDMA_CM_STATE_LISTENING:
cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->th_ack);
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
irdma_send_reset(cm_node);
break;
case IRDMA_CM_STATE_CLOSED:
cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->th_ack);
irdma_cleanup_retrans_entry(cm_node);
- atomic_inc(&cm_node->refcnt);
- irdma_send_reset(cm_node);
+ irdma_add_ref_cmnode(cm_node);
+ if (irdma_send_reset(cm_node))
+ irdma_rem_ref_cmnode(cm_node);
break;
case IRDMA_CM_STATE_ESTABLISHED:
case IRDMA_CM_STATE_FIN_WAIT1:
@@ -2663,7 +2824,7 @@ irdma_handle_ack_pkt(struct irdma_cm_node *cm_node,
if (ret)
return ret;
cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->th_ack);
- cm_node->state = IRDMA_CM_STATE_ESTABLISHED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_ESTABLISHED);
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
irdma_handle_rcv_mpa(cm_node, rbuf);
@@ -2688,23 +2849,24 @@ irdma_handle_ack_pkt(struct irdma_cm_node *cm_node,
break;
case IRDMA_CM_STATE_LISTENING:
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
irdma_send_reset(cm_node);
break;
case IRDMA_CM_STATE_CLOSED:
irdma_cleanup_retrans_entry(cm_node);
- atomic_inc(&cm_node->refcnt);
- irdma_send_reset(cm_node);
+ irdma_add_ref_cmnode(cm_node);
+ if (irdma_send_reset(cm_node))
+ irdma_rem_ref_cmnode(cm_node);
break;
case IRDMA_CM_STATE_LAST_ACK:
case IRDMA_CM_STATE_CLOSING:
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_CLOSED;
- irdma_rem_ref_cm_node(cm_node);
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
+ irdma_rem_ref_cmnode(cm_node);
break;
case IRDMA_CM_STATE_FIN_WAIT1:
irdma_cleanup_retrans_entry(cm_node);
- cm_node->state = IRDMA_CM_STATE_FIN_WAIT2;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_FIN_WAIT2);
break;
case IRDMA_CM_STATE_SYN_SENT:
case IRDMA_CM_STATE_FIN_WAIT2:
@@ -2851,8 +3013,8 @@ irdma_create_cm_node(struct irdma_cm_core *cm_core,
/* create a CM connection node */
cm_node = irdma_make_cm_node(cm_core, iwdev, cm_info, NULL);
- if (!cm_node)
- return -ENOMEM;
+ if (IS_ERR(cm_node))
+ return PTR_ERR(cm_node);
/* set our node side to client (active) side */
cm_node->tcp_cntxt.client = 1;
@@ -2889,13 +3051,13 @@ irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
passive_state = atomic_add_return(1, &cm_node->passive_state);
if (passive_state == IRDMA_SEND_RESET_EVENT) {
- cm_node->state = IRDMA_CM_STATE_CLOSED;
- irdma_rem_ref_cm_node(cm_node);
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
+ irdma_rem_ref_cmnode(cm_node);
return 0;
}
- if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
- irdma_rem_ref_cm_node(cm_node);
+ if (irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_LISTENER_DESTROYED)) {
+ irdma_rem_ref_cmnode(cm_node);
return 0;
}
@@ -2903,7 +3065,7 @@ irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
if (!ret)
return 0;
- cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_CLOSED);
if (irdma_send_reset(cm_node))
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"send reset failed\n");
@@ -2930,7 +3092,7 @@ irdma_cm_close(struct irdma_cm_node *cm_node)
irdma_send_reset(cm_node);
break;
case IRDMA_CM_STATE_CLOSE_WAIT:
- cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_LAST_ACK);
irdma_send_fin(cm_node);
break;
case IRDMA_CM_STATE_FIN_WAIT1:
@@ -2948,13 +3110,13 @@ irdma_cm_close(struct irdma_cm_node *cm_node)
case IRDMA_CM_STATE_INITED:
case IRDMA_CM_STATE_CLOSED:
case IRDMA_CM_STATE_LISTENER_DESTROYED:
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
break;
case IRDMA_CM_STATE_OFFLOADED:
if (cm_node->send_entry)
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"CM send_entry in OFFLOADED state\n");
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
break;
}
@@ -3052,28 +3214,29 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
cm_info.cm_id = listener->cm_id;
cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info,
listener);
- if (!cm_node) {
+ if (IS_ERR(cm_node)) {
irdma_debug(&cm_core->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "allocate node failed\n");
+ "allocate node failed ret=%ld\n",
+ PTR_ERR(cm_node));
atomic_dec(&listener->refcnt);
return;
}
if (!(tcp_get_flags(tcph) & (TH_RST | TH_FIN))) {
- cm_node->state = IRDMA_CM_STATE_LISTENING;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_LISTENING);
} else {
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
return;
}
- atomic_inc(&cm_node->refcnt);
- } else if (cm_node->state == IRDMA_CM_STATE_OFFLOADED) {
- irdma_rem_ref_cm_node(cm_node);
+ irdma_add_ref_cmnode(cm_node);
+ } else if (irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_OFFLOADED)) {
+ irdma_rem_ref_cmnode(cm_node);
return;
}
irdma_process_pkt(cm_node, rbuf);
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
}
static int
@@ -3248,7 +3411,7 @@ irdma_cm_init_tsa_conn(struct irdma_qp *iwqp,
cm_node->lsmm_size;
}
- cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_OFFLOADED);
iwqp->tcp_info.tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
iwqp->tcp_info.src_mac_addr_idx = iwqp->iwdev->mac_ip_table_idx;
@@ -3310,6 +3473,68 @@ irdma_qp_disconnect(struct irdma_qp *iwqp)
irdma_cm_close(iwqp->cm_node);
}
+static void
+dump_qp_ae_info(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_ae_info *ae_info = &iwdev->ae_info;
+ u16 ae = iwqp->last_aeq;
+
+ if (!ae)
+ return;
+
+ /*
+ * When there is a hard link disconnect reduce prints to avoid slowing down qp cleanup.
+ */
+ if (ae == IRDMA_AE_LLP_TOO_MANY_RETRIES) {
+ unsigned long flags;
+ u32 retry_cnt;
+
+ spin_lock_irqsave(&ae_info->info_lock, flags);
+ ae_info->retry_cnt++;
+ if (time_after(ae_info->retry_delay, jiffies)) {
+ spin_unlock_irqrestore(&ae_info->info_lock, flags);
+ return;
+ }
+
+ retry_cnt = ae_info->retry_cnt;
+ ae_info->retry_cnt = 0;
+ ae_info->retry_delay = jiffies +
+ msecs_to_jiffies(IRDMA_RETRY_PRINT_MS);
+ spin_unlock_irqrestore(&ae_info->info_lock, flags);
+
+ irdma_dev_err(&iwdev->ibdev,
+ "qp async event qp_id = %d, ae = 0x%x (%s), qp_cnt = %d\n",
+ iwqp->sc_qp.qp_uk.qp_id, ae, irdma_get_ae_desc(ae),
+ retry_cnt);
+
+ return;
+ }
+ switch (ae) {
+ case IRDMA_AE_BAD_CLOSE:
+ case IRDMA_AE_LLP_CLOSE_COMPLETE:
+ case IRDMA_AE_LLP_CONNECTION_RESET:
+ case IRDMA_AE_LLP_FIN_RECEIVED:
+ case IRDMA_AE_LLP_SYN_RECEIVED:
+ case IRDMA_AE_LLP_TERMINATE_RECEIVED:
+ case IRDMA_AE_LLP_DOUBT_REACHABILITY:
+ case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ case IRDMA_AE_RESET_SENT:
+ case IRDMA_AE_TERMINATE_SENT:
+ case IRDMA_AE_RESET_NOT_SENT:
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
+ "qp async avent qp_id = %d, ae = 0x%x (%s), src = %d, ae_cnt = %d\n",
+ iwqp->sc_qp.qp_uk.qp_id, ae, irdma_get_ae_desc(ae),
+ iwqp->ae_src, atomic_read(&ae_info->ae_cnt));
+ break;
+ default:
+ irdma_dev_err(&iwdev->ibdev,
+ "qp async event qp_id = %d, ae = 0x%x (%s), src = %d, ae_cnt = %d\n",
+ iwqp->sc_qp.qp_uk.qp_id, ae, irdma_get_ae_desc(ae),
+ iwqp->ae_src, atomic_read(&ae_info->ae_cnt));
+ }
+}
+
/**
* irdma_cm_disconn_true - called by worker thread to disconnect qp
* @iwqp: associate qp for the connection
@@ -3331,11 +3556,15 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
int err;
iwdev = iwqp->iwdev;
+
+ dump_qp_ae_info(iwqp);
spin_lock_irqsave(&iwqp->lock, flags);
+
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
struct ib_qp_attr attr;
- if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) {
+ if (atomic_read(&iwqp->flush_issued) ||
+ iwqp->sc_qp.qp_uk.destroy_pending) {
spin_unlock_irqrestore(&iwqp->lock, flags);
return;
}
@@ -3358,10 +3587,8 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
issue_close = 1;
iwqp->cm_id = NULL;
irdma_terminate_del_timer(qp);
- if (!iwqp->flush_issued) {
- iwqp->flush_issued = 1;
+ if (!atomic_read(&iwqp->flush_issued))
issue_flush = 1;
- }
} else if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT) ||
((original_ibqp_state == IB_QPS_RTS) &&
(last_ae == IRDMA_AE_LLP_CONNECTION_RESET))) {
@@ -3378,10 +3605,8 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;
- if (!iwqp->flush_issued) {
- iwqp->flush_issued = 1;
+ if (!atomic_read(&iwqp->flush_issued))
issue_flush = 1;
- }
}
spin_unlock_irqrestore(&iwqp->lock, flags);
@@ -3401,7 +3626,7 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
return;
}
- atomic_inc(&iwqp->cm_node->refcnt);
+ irdma_add_ref_cmnode(iwqp->cm_node);
spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
@@ -3424,7 +3649,7 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
cm_id);
irdma_qp_disconnect(iwqp);
}
- irdma_rem_ref_cm_node(iwqp->cm_node);
+ irdma_rem_ref_cmnode(iwqp->cm_node);
}
/**
@@ -3544,7 +3769,7 @@ irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwpd = iwqp->iwpd;
tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len,
- IB_ACCESS_LOCAL_WRITE, &tagged_offset);
+ IB_ACCESS_LOCAL_WRITE, &tagged_offset, false);
if (IS_ERR(ibmr)) {
ret = -ENOMEM;
goto error;
@@ -3611,7 +3836,7 @@ irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return 0;
error:
irdma_free_lsmm_rsrc(iwqp);
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
return ret;
}
@@ -3761,8 +3986,8 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
irdma_qp_add_ref(&iwqp->ibqp);
cm_id->add_ref(cm_id);
- if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
- cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ if (!irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_OFFLOADED)) {
+ irdma_cm_node_set_state(cm_node, IRDMA_CM_STATE_SYN_SENT);
ret = irdma_send_syn(cm_node, 0);
if (ret)
goto err;
@@ -3784,7 +4009,7 @@ err:
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"connect() FAILED: dest addr=%x:%x:%x:%x",
IRDMA_PRINT_IP6(cm_info.rem_addr));
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
iwdev->cm_core.stats_connect_errs++;
return ret;
@@ -3955,7 +4180,7 @@ irdma_iw_teardown_list_prep(struct irdma_cm_core *cm_core,
if ((disconnect_all ||
(nfo->vlan_id == cm_node->vlan_id &&
!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) &&
- atomic_inc_not_zero(&cm_node->refcnt))
+ irdma_add_ref_cmnode(cm_node))
list_add(&cm_node->teardown_entry, teardown_list);
}
}
@@ -4089,7 +4314,7 @@ error:
cm_id->provider_data = NULL;
irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
status);
- irdma_rem_ref_cm_node(event->cm_node);
+ irdma_rem_ref_cmnode(event->cm_node);
}
/**
@@ -4144,20 +4369,20 @@ irdma_cm_event_handler(struct work_struct *work)
break;
case IRDMA_CM_EVENT_CONNECTED:
if (!event->cm_node->cm_id ||
- event->cm_node->state != IRDMA_CM_STATE_OFFLOADED)
+ !irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_OFFLOADED))
break;
irdma_cm_event_connected(event);
break;
case IRDMA_CM_EVENT_MPA_REJECT:
if (!event->cm_node->cm_id ||
- cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_OFFLOADED))
break;
irdma_send_cm_event(cm_node, cm_node->cm_id,
IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
break;
case IRDMA_CM_EVENT_ABORTED:
if (!event->cm_node->cm_id ||
- event->cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ irdma_cm_node_cmp_state(cm_node, IRDMA_CM_STATE_OFFLOADED))
break;
irdma_event_connect_error(event);
break;
@@ -4167,7 +4392,7 @@ irdma_cm_event_handler(struct work_struct *work)
break;
}
- irdma_rem_ref_cm_node(event->cm_node);
+ irdma_rem_ref_cmnode(cm_node);
kfree(event);
}
@@ -4178,7 +4403,7 @@ irdma_cm_event_handler(struct work_struct *work)
static void
irdma_cm_post_event(struct irdma_cm_event *event)
{
- atomic_inc(&event->cm_node->refcnt);
+ irdma_add_ref_cmnode(event->cm_node);
INIT_WORK(&event->event_work, irdma_cm_event_handler);
queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
}
@@ -4219,7 +4444,7 @@ irdma_cm_teardown_connections(struct irdma_device *iwdev,
irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
if (iwdev->rf->reset)
irdma_cm_disconn(cm_node->iwqp);
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
}
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
diff --git a/sys/dev/irdma/irdma_cm.h b/sys/dev/irdma/irdma_cm.h
index 36cebdb5bf19..26fbee4499c1 100644
--- a/sys/dev/irdma/irdma_cm.h
+++ b/sys/dev/irdma/irdma_cm.h
@@ -443,6 +443,7 @@ int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr,
bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
void irdma_send_ack(struct irdma_cm_node *cm_node);
void irdma_lpb_nop(struct irdma_sc_qp *qp);
-void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node);
+bool irdma_add_ref_cmnode(struct irdma_cm_node *cm_node);
+void irdma_rem_ref_cmnode(struct irdma_cm_node *cm_node);
void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node);
#endif /* IRDMA_CM_H */
diff --git a/sys/dev/irdma/irdma_ctrl.c b/sys/dev/irdma/irdma_ctrl.c
index 79ed14a60670..c3bddab7f477 100644
--- a/sys/dev/irdma/irdma_ctrl.c
+++ b/sys/dev/irdma/irdma_ctrl.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -81,6 +81,24 @@ irdma_get_qp_from_list(struct list_head *head,
}
/**
+ * irdma_get_qp_qs - return qs_handle for the qp
+ * @qp: qp for qset
+ *
+ * Returns the queue set that should be used for a given qp. The qos
+ * mutex should be acquired before calling.
+ */
+static u16 irdma_get_qp_qs(struct irdma_sc_qp *qp){
+
+ struct irdma_sc_vsi *vsi = qp->vsi;
+ u16 qs_handle;
+
+ qs_handle =
+ vsi->qos[qp->user_pri].qs_handle;
+
+ return qs_handle;
+}
+
+/**
* irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
* @vsi: the VSI struct pointer
* @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
@@ -96,18 +114,28 @@ irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
while (qp) {
if (op == IRDMA_OP_RESUME) {
+ if (!qp->suspended) {
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist,
+ qp);
+ continue;
+ }
if (!qp->dev->ws_add(vsi, i)) {
- qp->qs_handle =
- vsi->qos[qp->user_pri].qs_handle;
- irdma_cqp_qp_suspend_resume(qp, op);
+ qp->qs_handle = irdma_get_qp_qs(qp);
+ if (!irdma_cqp_qp_suspend_resume(qp, op))
+ qp->suspended = false;
} else {
- irdma_cqp_qp_suspend_resume(qp, op);
+ if (!irdma_cqp_qp_suspend_resume(qp, op))
+ qp->suspended = false;
irdma_modify_qp_to_err(qp);
}
} else if (op == IRDMA_OP_SUSPEND) {
/* issue cqp suspend command */
- if (!irdma_cqp_qp_suspend_resume(qp, op))
+ if ((qp->qp_state == IRDMA_QP_STATE_RTS ||
+ qp->qp_state == IRDMA_QP_STATE_RTR) &&
+ !irdma_cqp_qp_suspend_resume(qp, op)) {
atomic_inc(&vsi->qp_suspend_reqs);
+ qp->suspended = true;
+ }
}
qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
}
@@ -135,7 +163,7 @@ irdma_set_qos_info(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2p)
vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
irdma_init_config_check(&vsi->cfg_check[i],
- l2p->up2tc[i],
+ l2p->up2tc[i], i,
l2p->qs_handle_list[i]);
vsi->qos[i].traffic_class = l2p->up2tc[i];
vsi->qos[i].rel_bw =
@@ -197,15 +225,16 @@ irdma_qp_add_qos(struct irdma_sc_qp *qp)
{
struct irdma_sc_vsi *vsi = qp->vsi;
- irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
- "DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
- qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
- qp->on_qoslist);
mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
if (!qp->on_qoslist) {
list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
qp->on_qoslist = true;
- qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
+ qp->qs_handle = irdma_get_qp_qs(qp);
+ irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
+ "DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
+
}
mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
}
@@ -362,6 +391,7 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
u64 qw1 = 0;
u64 qw2 = 0;
u64 temp;
+ u16 qs_handle;
struct irdma_sc_vsi *vsi = info->vsi;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -383,8 +413,10 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
}
- qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
- vsi->qos[info->user_pri].qs_handle);
+
+ qs_handle = vsi->qos[info->user_pri].qs_handle;
+
+ qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE, qs_handle);
if (info->vlan_valid)
qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
set_64bit_val(wqe, IRDMA_BYTE_16, qw2);
@@ -493,7 +525,6 @@ irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
qp->rcv_tph_en = info->rcv_tph_en;
qp->xmit_tph_en = info->xmit_tph_en;
qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
- qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
return 0;
}
@@ -1167,9 +1198,9 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
- FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
- FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ /* for FNIC, a PF can send this WQE for a VF */
+ hdr |= FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -1246,7 +1277,8 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
set_64bit_val(wqe, IRDMA_BYTE_48,
FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
- set_64bit_val(wqe, IRDMA_BYTE_40, info->hmc_fcn_index);
+ hdr = info->hmc_fcn_index;
+ set_64bit_val(wqe, IRDMA_BYTE_40, hdr);
addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
@@ -1257,7 +1289,6 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
- FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1299,6 +1330,7 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_SKIPFLUSH, info->skip_flush_markers) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1424,7 +1456,7 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "FAST_REG WQE", wqe,
- IRDMA_QP_WQE_MIN_SIZE);
+ quanta * IRDMA_QP_WQE_MIN_SIZE);
if (sq_info.push_wqe)
irdma_qp_push_wqe(&qp->qp_uk, wqe, quanta, wqe_idx, post_sq);
else if (post_sq)
@@ -1970,7 +2002,7 @@ irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
mutex_init(&vsi->qos[i].qos_mutex);
INIT_LIST_HEAD(&vsi->qos[i].qplist);
}
- if (vsi->register_qset) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
vsi->dev->ws_add = irdma_ws_add;
vsi->dev->ws_remove = irdma_ws_remove;
vsi->dev->ws_reset = irdma_ws_reset;
@@ -1982,23 +2014,6 @@ irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
}
/**
- * irdma_get_stats_idx - Return stats index
- * @vsi: pointer to the vsi
- */
-static u16 irdma_get_stats_idx(struct irdma_sc_vsi *vsi){
- struct irdma_stats_inst_info stats_info = {0};
- struct irdma_sc_dev *dev = vsi->dev;
-
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
- if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
- &stats_info))
- return stats_info.stats_idx;
- }
-
- return IRDMA_INVALID_STATS_IDX;
-}
-
-/**
* irdma_vsi_stats_init - Initialize the vsi statistics
* @vsi: pointer to the vsi structure
* @info: The info structure used for initialization
@@ -2030,16 +2045,6 @@ irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
/* when stat allocation is not required default to fcn_id. */
vsi->stats_idx = info->fcn_id;
- if (info->alloc_stats_inst) {
- u16 stats_idx = irdma_get_stats_idx(vsi);
-
- if (stats_idx != IRDMA_INVALID_STATS_IDX) {
- vsi->stats_inst_alloc = true;
- vsi->stats_idx = stats_idx;
- vsi->pestat->gather_info.use_stats_inst = true;
- vsi->pestat->gather_info.stats_inst_index = stats_idx;
- }
- }
return 0;
}
@@ -2051,16 +2056,6 @@ irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
void
irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
{
- struct irdma_stats_inst_info stats_info = {0};
- struct irdma_sc_dev *dev = vsi->dev;
-
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
- if (vsi->stats_inst_alloc) {
- stats_info.stats_idx = vsi->stats_idx;
- irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
- &stats_info);
- }
- }
if (!vsi->pestat)
return;
@@ -2136,45 +2131,6 @@ irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
}
/**
- * irdma_sc_manage_stats_inst - allocate or free stats instance
- * @cqp: struct for cqp hw
- * @info: stats info structure
- * @alloc: alloc vs. delete flag
- * @scratch: u64 saved to be used during cqp completion
- */
-static int
-irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
- struct irdma_stats_inst_info *info,
- bool alloc, u64 scratch)
-{
- __le64 *wqe;
- u64 temp;
-
- wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return -ENOSPC;
-
- set_64bit_val(wqe, IRDMA_BYTE_40,
- FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
- temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
- FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
- FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
- info->use_hmc_fcn_index) |
- FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
- FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
-
- irdma_wmb(); /* make sure WQE is written before valid bit is set */
-
- set_64bit_val(wqe, IRDMA_BYTE_24, temp);
-
- irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_STATS WQE", wqe,
- IRDMA_CQP_WQE_SIZE * 8);
-
- irdma_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
* irdma_sc_set_up_map - set the up map table
* @cqp: struct for cqp hw
* @info: User priority map info
@@ -2940,7 +2896,7 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf,
IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info,
IRDMA_HMC_IW_MD);
- if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) {
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info,
IRDMA_HMC_IW_OOISC);
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info,
@@ -2950,7 +2906,7 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf,
/* searching for the last object in HMC to find the size of the HMC area. */
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
- if (info[i].base > max_base) {
+ if (info[i].base > max_base && info[i].cnt) {
max_base = info[i].base;
last_hmc_obj = i;
}
@@ -3079,7 +3035,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
- if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) {
irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
get_64bit_val(buf, IRDMA_BYTE_168, &temp);
@@ -3173,8 +3129,8 @@ irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
{
u8 hw_sq_size;
- if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
- info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
+ if (info->sq_size > IRDMA_CQP_SW_SQSIZE_MAX ||
+ info->sq_size < IRDMA_CQP_SW_SQSIZE_MIN ||
((info->sq_size & (info->sq_size - 1))))
return -EINVAL;
@@ -3202,6 +3158,7 @@ irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
cqp->protocol_used = info->protocol_used;
irdma_memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
cqp->en_rem_endpoint_trk = info->en_rem_endpoint_trk;
+ cqp->timer_slots = info->timer_slots;
info->dev->cqp = cqp;
IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
@@ -3262,6 +3219,8 @@ irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
+ if (hw_rev == IRDMA_GEN_2)
+ temp |= FIELD_PREP(IRDMA_CQPHC_TMR_SLOT, cqp->timer_slots);
if (hw_rev >= IRDMA_GEN_2)
temp |= FIELD_PREP(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK,
cqp->en_rem_endpoint_trk);
@@ -3836,10 +3795,9 @@ irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
/**
* irdma_sc_cceq_create - create cceq
* @ceq: ceq sc structure
- * @scratch: u64 saved to be used during cqp completion
*/
int
-irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
+irdma_sc_cceq_create(struct irdma_sc_ceq *ceq)
{
int ret_code;
struct irdma_sc_dev *dev = ceq->dev;
@@ -3850,7 +3808,7 @@ irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
if (ret_code)
return ret_code;
}
- ret_code = irdma_sc_ceq_create(ceq, scratch, true);
+ ret_code = irdma_sc_ceq_create(ceq, 0, true);
if (!ret_code)
return irdma_sc_cceq_create_done(ceq);
@@ -4081,7 +4039,9 @@ irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq)
u64 hdr;
dev = aeq->dev;
- writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -4180,6 +4140,7 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_RESET_SENT:
case IRDMA_AE_TERMINATE_SENT:
case IRDMA_AE_RESET_NOT_SENT:
@@ -4388,8 +4349,10 @@ irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
u64 hdr;
int ret_code = 0;
u32 tail, val, error;
+ struct irdma_sc_dev *dev;
cqp = ccq->dev->cqp;
+ dev = ccq->dev;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOSPC;
@@ -4418,10 +4381,11 @@ irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
if (post_sq) {
irdma_sc_cqp_post_sq(cqp);
ret_code = irdma_cqp_poll_registers(cqp, tail,
- cqp->dev->hw_attrs.max_done_count);
+ dev->hw_attrs.max_done_count);
}
cqp->process_cqp_sds = irdma_update_sds_noccq;
+ dev->ccq = NULL;
return ret_code;
}
@@ -4792,8 +4756,11 @@ irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
int
irdma_get_rdma_features(struct irdma_sc_dev *dev)
{
- int ret_code, byte_idx, feat_type, feat_cnt, feat_idx;
struct irdma_dma_mem feat_buf;
+ u16 feat_cnt;
+ u16 feat_idx;
+ u8 feat_type;
+ int ret_code;
u64 temp;
feat_buf.size = IRDMA_FEATURE_BUF_SIZE;
@@ -4834,13 +4801,18 @@ irdma_get_rdma_features(struct irdma_sc_dev *dev)
}
}
- irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", feat_buf.va,
- feat_cnt * 8);
+ irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES",
+ feat_buf.va, feat_cnt * 8);
- for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, IRDMA_MAX_FEATURES);
- feat_idx++, byte_idx += 8) {
- get_64bit_val(feat_buf.va, byte_idx, &temp);
+ for (feat_idx = 0; feat_idx < feat_cnt; feat_idx++) {
+ get_64bit_val(feat_buf.va, feat_idx * 8, &temp);
feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
+
+ if (feat_type >= IRDMA_MAX_FEATURES) {
+ irdma_debug(dev, IRDMA_DEBUG_DEV,
+ "unknown feature type %u\n", feat_type);
+ continue;
+ }
dev->feature_info[feat_type] = temp;
}
exit:
@@ -4899,6 +4871,28 @@ cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
}
/**
+ * irdma_cfg_sd_mem - allocate sd memory
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ */
+static int
+irdma_cfg_sd_mem(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info)
+{
+ struct irdma_virt_mem virt_mem;
+ u32 mem_size;
+
+ mem_size = sizeof(struct irdma_hmc_sd_entry) * hmc_info->sd_table.sd_cnt;
+ virt_mem.size = mem_size;
+ virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
+ if (!virt_mem.va)
+ return -ENOMEM;
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return 0;
+}
+
+/**
* irdma_cfg_fpm_val - configure HMC objects
* @dev: sc device struct
* @qp_count: desired qp count
@@ -4906,10 +4900,8 @@ cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
int
irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
{
- struct irdma_virt_mem virt_mem;
- u32 i, mem_size;
u32 qpwanted, mrwanted, pblewanted;
- u32 hte;
+ u32 hte, i;
u32 sd_needed;
u32 sd_diff;
u32 loop_count = 0;
@@ -4934,7 +4926,7 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
sd_needed = irdma_est_sd(dev, hmc_info);
- irdma_debug(dev, IRDMA_DEBUG_HMC, "sd count %d where max sd is %d\n",
+ irdma_debug(dev, IRDMA_DEBUG_HMC, "sd count %u where max sd is %u\n",
hmc_info->sd_table.sd_cnt, max_sds);
qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
@@ -4945,7 +4937,7 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
irdma_debug(dev, IRDMA_DEBUG_HMC,
- "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
+ "req_qp=%d max_sd=%u, max_qp = %u, max_cq=%u, max_mr=%u, max_pble=%u, mc=%d, av=%u\n",
qp_count, max_sds,
hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
@@ -4959,8 +4951,7 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
- if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
- hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
qpwanted /= 2;
@@ -5013,11 +5004,12 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (!(loop_count % 2) && qpwanted > 128) {
qpwanted /= 2;
} else {
- mrwanted /= 2;
pblewanted /= 2;
+ mrwanted /= 2;
}
continue;
}
+
if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
@@ -5043,7 +5035,7 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (sd_needed > max_sds) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
- "cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
+ "cfg_fpm failed loop_cnt=%u, sd_needed=%u, max sd count %u\n",
loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
return -EINVAL;
}
@@ -5073,18 +5065,7 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
return ret_code;
}
- mem_size = sizeof(struct irdma_hmc_sd_entry) *
- (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
- virt_mem.size = mem_size;
- virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
- if (!virt_mem.va) {
- irdma_debug(dev, IRDMA_DEBUG_HMC,
- "failed to allocate memory for sd_entry buffer\n");
- return -ENOMEM;
- }
- hmc_info->sd_table.sd_entry = virt_mem.va;
-
- return ret_code;
+ return irdma_cfg_sd_mem(dev, hmc_info);
}
/**
@@ -5098,7 +5079,6 @@ irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
{
int status;
struct irdma_dma_mem val_mem;
- bool alloc = false;
dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
switch (pcmdinfo->cqp_cmd) {
@@ -5204,15 +5184,6 @@ irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
true,
IRDMA_CQP_WAIT_EVENT);
break;
- case IRDMA_OP_STATS_ALLOCATE:
- alloc = true;
- /* fallthrough */
- case IRDMA_OP_STATS_FREE:
- status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
- &pcmdinfo->in.u.stats_manage.info,
- alloc,
- pcmdinfo->in.u.stats_manage.scratch);
- break;
case IRDMA_OP_STATS_GATHER:
status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
&pcmdinfo->in.u.stats_gather.info,
@@ -5381,6 +5352,7 @@ irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
status = irdma_exec_cqp_cmd(dev, pcmdinfo);
else
list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
+ pcmdinfo->cqp_cmd_exec_status = status;
spin_unlock_irqrestore(&dev->cqp_lock, flags);
return status;
}
@@ -5389,7 +5361,7 @@ irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
* irdma_process_bh - called from tasklet for cqp list
* @dev: sc device struct
*/
-int
+void
irdma_process_bh(struct irdma_sc_dev *dev)
{
int status = 0;
@@ -5402,10 +5374,9 @@ irdma_process_bh(struct irdma_sc_dev *dev)
pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
status = irdma_exec_cqp_cmd(dev, pcmdinfo);
if (status)
- break;
+ pcmdinfo->cqp_cmd_exec_status = status;
}
spin_unlock_irqrestore(&dev->cqp_lock, flags);
- return status;
}
/**
@@ -5464,7 +5435,7 @@ irdma_wait_pe_ready(struct irdma_sc_dev *dev)
if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
statuscpu2 == 0x80)
return 0;
- mdelay(1000);
+ mdelay(100);
} while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
return -1;
}
@@ -5566,9 +5537,9 @@ static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff,
static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val) {
if (new_val >= old_val)
return new_val - old_val;
- else
- /* roll-over case */
- return max_val - old_val + new_val + 1;
+
+ /* roll-over case */
+ return max_val - old_val + new_val + 1;
}
/**
diff --git a/sys/dev/irdma/irdma_defs.h b/sys/dev/irdma/irdma_defs.h
index fd3bf82c9ad6..a4bed8d5f93d 100644
--- a/sys/dev/irdma/irdma_defs.h
+++ b/sys/dev/irdma/irdma_defs.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -66,7 +66,6 @@
#define IRDMA_DSCP_NUM_VAL 64
#define IRDMA_MAX_TRAFFIC_CLASS 8
#define IRDMA_MAX_STATS_COUNT 128
-#define IRDMA_FIRST_NON_PF_STAT 4
#define IRDMA_MIN_MTU_IPV4 576
#define IRDMA_MIN_MTU_IPV6 1280
@@ -115,11 +114,31 @@
#define IRDMA_BYTE_200 200
#define IRDMA_BYTE_208 208
#define IRDMA_BYTE_216 216
+#define IRDMA_BYTE_224 224
+#define IRDMA_BYTE_232 232
+#define IRDMA_BYTE_240 240
+#define IRDMA_BYTE_248 248
+#define IRDMA_BYTE_256 256
+#define IRDMA_BYTE_264 264
+#define IRDMA_BYTE_272 272
+#define IRDMA_BYTE_280 280
+#define IRDMA_BYTE_288 288
+#define IRDMA_BYTE_296 296
+#define IRDMA_BYTE_304 304
+#define IRDMA_BYTE_312 312
+#define IRDMA_BYTE_320 320
+#define IRDMA_BYTE_328 328
+#define IRDMA_BYTE_336 336
+#define IRDMA_BYTE_344 344
+#define IRDMA_BYTE_352 352
+#define IRDMA_BYTE_360 360
+#define IRDMA_BYTE_368 368
+#define IRDMA_BYTE_376 376
+#define IRDMA_BYTE_384 384
#define IRDMA_CQP_WAIT_POLL_REGS 1
#define IRDMA_CQP_WAIT_POLL_CQ 2
#define IRDMA_CQP_WAIT_EVENT 3
-
#define IRDMA_AE_SOURCE_RSVD 0x0
#define IRDMA_AE_SOURCE_RQ 0x1
#define IRDMA_AE_SOURCE_RQ_0011 0x3
@@ -157,8 +176,8 @@
#define IRDMA_TCP_STATE_RESERVED_3 14
#define IRDMA_TCP_STATE_RESERVED_4 15
-#define IRDMA_CQP_SW_SQSIZE_4 4
-#define IRDMA_CQP_SW_SQSIZE_2048 2048
+#define IRDMA_CQP_SW_SQSIZE_MIN 4
+#define IRDMA_CQP_SW_SQSIZE_MAX 2048
#define IRDMA_CQ_TYPE_IWARP 1
#define IRDMA_CQ_TYPE_ILQ 2
@@ -202,6 +221,8 @@
#define IRDMA_MAX_RQ_WQE_SHIFT_GEN1 2
#define IRDMA_MAX_RQ_WQE_SHIFT_GEN2 3
+#define IRDMA_DEFAULT_MAX_PUSH_LEN 8192
+
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
@@ -222,6 +243,7 @@
#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
#define IRDMAQP_OP_NOP 0x0c
#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d
+
#define IRDMAQP_OP_GEN_RTS_AE 0x30
enum irdma_cqp_op_type {
@@ -272,9 +294,9 @@ enum irdma_cqp_op_type {
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
IRDMA_OP_CQ_MODIFY = 48,
-
+ IRDMA_OP_WS_MOVE = 49,
/* Must be last entry */
- IRDMA_MAX_CQP_OPS = 49,
+ IRDMA_MAX_CQP_OPS = 50,
};
/* CQP SQ WQES */
@@ -322,6 +344,7 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_MANAGE_STATS 0x2d
#define IRDMA_CQP_OP_GATHER_STATS 0x2e
#define IRDMA_CQP_OP_UP_MAP 0x2f
+#define IRDMA_CQP_OP_MOVE_WS_NODES 0x34
#ifndef LS_64_1
#define LS_64_1(val, bits) ((u64)(uintptr_t)(val) << (bits))
@@ -420,6 +443,7 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
#define IRDMA_CQPSQ_WS_OP_S 32
#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_WS_MOVE_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_WS_PARENTID_S 16
#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16)
#define IRDMA_CQPSQ_WS_NODEID_S 0
@@ -493,6 +517,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPHC_EN_REM_ENDPOINT_TRK_S 3
#define IRDMA_CQPHC_EN_REM_ENDPOINT_TRK BIT_ULL(3)
+#define IRDMA_CQPHC_TMR_SLOT_S 16
+#define IRDMA_CQPHC_TMR_SLOT GENMASK_ULL(19, 16)
#define IRDMA_CQPHC_ENABLED_VFS_S 32
#define IRDMA_CQPHC_ENABLED_VFS GENMASK_ULL(37, 32)
@@ -672,10 +698,10 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_QP_QPCTX IRDMA_CQPHC_QPCTX
#define IRDMA_CQPSQ_QP_QPID_S 0
-#define IRDMA_CQPSQ_QP_QPID_M (0xFFFFFFUL)
+#define IRDMA_CQPSQ_QP_QPID GENMASK_ULL(23, 0)
#define IRDMA_CQPSQ_QP_OP_S 32
-#define IRDMA_CQPSQ_QP_OP_M IRDMACQ_OP_M
+#define IRDMA_CQPSQ_QP_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_QP_ORDVALID_S 42
#define IRDMA_CQPSQ_QP_ORDVALID BIT_ULL(42)
#define IRDMA_CQPSQ_QP_TOECTXVALID_S 43
@@ -751,6 +777,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
#define IRDMA_CQPSQ_STAG_MWTYPE_S 42
#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
+#define IRDMA_CQPSQ_STAG_SKIPFLUSH_S 40
+#define IRDMA_CQPSQ_STAG_SKIPFLUSH BIT_ULL(40)
#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY_S 58
#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
@@ -767,8 +795,6 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STAG_VABASEDTO BIT_ULL(59)
#define IRDMA_CQPSQ_STAG_USEHMCFNIDX_S 60
#define IRDMA_CQPSQ_STAG_USEHMCFNIDX BIT_ULL(60)
-#define IRDMA_CQPSQ_STAG_USEPFRID_S 61
-#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
#define IRDMA_CQPSQ_STAG_PBA_S IRDMA_CQPHC_QPCTX_S
#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
@@ -882,12 +908,15 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
#define IRDMA_COMMIT_FPM_QPCNT_S 0
-#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0)
+#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(20, 0)
#define IRDMA_COMMIT_FPM_BASE_S 32
#define IRDMA_CQPSQ_CFPM_HMCFNID_S 0
#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_CFPM_HW_FLUSH_TIMER_DISABLE_S 43
+#define IRDMA_CQPSQ_CFPM_HW_FLUSH_TIMER_DISABLE BIT_ULL(43)
+
#define IRDMA_CQPSQ_FWQE_AECODE_S 0
#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_FWQE_AESOURCE_S 16
@@ -1123,9 +1152,9 @@ enum irdma_cqp_op_type {
#define IRDMAQPC_RNRNAK_THRESH_S 54
#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
#define IRDMAQPC_TXCQNUM_S 0
-#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0)
+#define IRDMAQPC_TXCQNUM GENMASK_ULL(24, 0)
#define IRDMAQPC_RXCQNUM_S 32
-#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32)
+#define IRDMAQPC_RXCQNUM GENMASK_ULL(56, 32)
#define IRDMAQPC_STAT_INDEX_S 0
#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
#define IRDMAQPC_Q2ADDR_S 8
@@ -1213,6 +1242,8 @@ enum irdma_cqp_op_type {
#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
#define IRDMA_RSVD_S 41
#define IRDMA_RSVD GENMASK_ULL(55, 41)
+#define IRDMA_FEATURE_RSRC_MAX_S 0
+#define IRDMA_FEATURE_RSRC_MAX GENMASK_ULL(31, 0)
#define IRDMAQPSQ_OPCODE_S 32
#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
@@ -1258,7 +1289,7 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_DESTQPN_S 32
#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
#define IRDMAQPSQ_AHID_S 0
-#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
+#define IRDMAQPSQ_AHID GENMASK_ULL(24, 0)
#define IRDMAQPSQ_INLINEDATAFLAG_S 57
#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
@@ -1347,7 +1378,7 @@ enum irdma_cqp_op_type {
#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX_S 0
#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
#define IRDMA_QUERY_FPM_MAX_PE_SDS_S 32
-#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(44, 32)
#define IRDMA_QUERY_FPM_MAX_CEQS_S 0
#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
@@ -1422,9 +1453,9 @@ enum irdma_cqp_op_type {
#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
{ \
u32 size; \
- size = (_ring).size; \
+ size = IRDMA_RING_SIZE(_ring); \
if (!IRDMA_RING_FULL_ERR(_ring)) { \
- (_ring).head = ((_ring).head + 1) % size; \
+ IRDMA_RING_CURRENT_HEAD(_ring) = (IRDMA_RING_CURRENT_HEAD(_ring) + 1) % size; \
(_retcode) = 0; \
} else { \
(_retcode) = -ENOSPC; \
@@ -1433,79 +1464,40 @@ enum irdma_cqp_op_type {
#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
{ \
u32 size; \
- size = (_ring).size; \
+ size = IRDMA_RING_SIZE(_ring); \
if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
- (_ring).head = ((_ring).head + (_count)) % size; \
- (_retcode) = 0; \
- } else { \
- (_retcode) = -ENOSPC; \
- } \
- }
-#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
- { \
- u32 size; \
- size = (_ring).size; \
- if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
- (_ring).head = ((_ring).head + 1) % size; \
- (_retcode) = 0; \
- } else { \
- (_retcode) = -ENOSPC; \
- } \
- }
-#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
- { \
- u32 size; \
- size = (_ring).size; \
- if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
- (_ring).head = ((_ring).head + (_count)) % size; \
+ IRDMA_RING_CURRENT_HEAD(_ring) = (IRDMA_RING_CURRENT_HEAD(_ring) + (_count)) % size; \
(_retcode) = 0; \
} else { \
(_retcode) = -ENOSPC; \
} \
}
-#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
- (_ring).head = ((_ring).head + (_count)) % (_ring).size
-#define IRDMA_RING_MOVE_TAIL(_ring) \
- (_ring).tail = ((_ring).tail + 1) % (_ring).size
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
+ (IRDMA_RING_CURRENT_HEAD(_ring) = (IRDMA_RING_CURRENT_HEAD(_ring) + (_count)) % IRDMA_RING_SIZE(_ring))
#define IRDMA_RING_MOVE_HEAD_NOCHECK(_ring) \
- (_ring).head = ((_ring).head + 1) % (_ring).size
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, 1)
#define IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
- (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+ IRDMA_RING_CURRENT_TAIL(_ring) = (IRDMA_RING_CURRENT_TAIL(_ring) + (_count)) % IRDMA_RING_SIZE(_ring)
+
+#define IRDMA_RING_MOVE_TAIL(_ring) \
+ IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, 1)
#define IRDMA_RING_SET_TAIL(_ring, _pos) \
- (_ring).tail = (_pos) % (_ring).size
+ WRITE_ONCE(IRDMA_RING_CURRENT_TAIL(_ring), (_pos) % IRDMA_RING_SIZE(_ring))
#define IRDMA_RING_FULL_ERR(_ring) \
( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 1)) \
- )
-
-#define IRDMA_ERR_RING_FULL2(_ring) \
- ( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 2)) \
- )
-
-#define IRDMA_ERR_RING_FULL3(_ring) \
- ( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 3)) \
+ (IRDMA_RING_USED_QUANTA(_ring) == (IRDMA_RING_SIZE(_ring) - 1)) \
)
#define IRDMA_SQ_RING_FULL_ERR(_ring) \
( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 257)) \
+ (IRDMA_RING_USED_QUANTA(_ring) == (IRDMA_RING_SIZE(_ring) - 257)) \
)
-#define IRDMA_ERR_SQ_RING_FULL2(_ring) \
- ( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 258)) \
- )
-#define IRDMA_ERR_SQ_RING_FULL3(_ring) \
- ( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 259)) \
- )
#define IRDMA_RING_MORE_WORK(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) != 0) \
@@ -1513,17 +1505,17 @@ enum irdma_cqp_op_type {
#define IRDMA_RING_USED_QUANTA(_ring) \
( \
- (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+ ((READ_ONCE(IRDMA_RING_CURRENT_HEAD(_ring)) + IRDMA_RING_SIZE(_ring) - READ_ONCE(IRDMA_RING_CURRENT_TAIL(_ring))) % IRDMA_RING_SIZE(_ring)) \
)
#define IRDMA_RING_FREE_QUANTA(_ring) \
( \
- ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 1) \
+ (IRDMA_RING_SIZE(_ring) - IRDMA_RING_USED_QUANTA(_ring) - 1) \
)
#define IRDMA_SQ_RING_FREE_QUANTA(_ring) \
( \
- ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 257) \
+ (IRDMA_RING_SIZE(_ring) - IRDMA_RING_USED_QUANTA(_ring) - 257) \
)
#define IRDMA_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
diff --git a/sys/dev/irdma/irdma_hmc.c b/sys/dev/irdma/irdma_hmc.c
index a3c47c8b1434..35c9373b9d86 100644
--- a/sys/dev/irdma/irdma_hmc.c
+++ b/sys/dev/irdma/irdma_hmc.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -271,12 +271,18 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
bool pd_error = false;
int ret_code = 0;
- if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ irdma_debug(dev, IRDMA_DEBUG_ERR,
+ "invalid hmc obj type %u, start = %u, req cnt %u, cnt = %u\n",
+ info->rsrc_type, info->start_idx, info->count,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+
return -EINVAL;
+ }
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
- irdma_debug(dev, IRDMA_DEBUG_HMC,
+ irdma_debug(dev, IRDMA_DEBUG_ERR,
"error type %u, start = %u, req cnt %u, cnt = %u\n",
info->rsrc_type, info->start_idx, info->count,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
diff --git a/sys/dev/irdma/irdma_hw.c b/sys/dev/irdma/irdma_hw.c
index 64c05b8663e0..9078a5a19b86 100644
--- a/sys/dev/irdma/irdma_hw.c
+++ b/sys/dev/irdma/irdma_hw.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2025 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -147,8 +147,9 @@ irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
sc_ceq = &ceq->sc_ceq;
do {
spin_lock_irqsave(&ceq->ce_lock, flags);
+
cq = irdma_sc_process_ceq(dev, sc_ceq);
- if (!cq) {
+ if (!cq || rf->reset) {
spin_unlock_irqrestore(&ceq->ce_lock, flags);
break;
}
@@ -203,8 +204,10 @@ irdma_complete_cqp_request(struct irdma_cqp *cqp,
/**
* irdma_process_aeq - handle aeq events
* @rf: RDMA PCI function
+ *
+ * Return: True if an AE was processed.
*/
-static void
+static bool
irdma_process_aeq(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
@@ -219,11 +222,10 @@ irdma_process_aeq(struct irdma_pci_f *rf)
struct irdma_device *iwdev = rf->iwdev;
struct irdma_qp_host_ctx_info *ctx_info = NULL;
unsigned long flags;
-
u32 aeqcnt = 0;
if (!sc_aeq->size)
- return;
+ return false;
do {
memset(info, 0, sizeof(*info));
@@ -231,7 +233,16 @@ irdma_process_aeq(struct irdma_pci_f *rf)
if (ret)
break;
+ if (info->aeqe_overflow) {
+ irdma_dev_err(&iwdev->ibdev, "AEQ has overflowed\n");
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ return (aeqcnt > 0);
+ }
+
aeqcnt++;
+ atomic_inc(&iwdev->ae_info.ae_cnt);
+
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
"ae_id = 0x%x (%s), is_qp = %d, qp_id = %d, tcp_state = %d, iwarp_state = %d, ae_src = %d\n",
info->ae_id, irdma_get_ae_desc(info->ae_id),
@@ -265,8 +276,12 @@ irdma_process_aeq(struct irdma_pci_f *rf)
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = info->tcp_state;
iwqp->hw_iwarp_state = info->iwarp_state;
- if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
+
+ if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE) {
iwqp->last_aeq = info->ae_id;
+ iwqp->ae_src = info->ae_src;
+ }
+
spin_unlock_irqrestore(&iwqp->lock, flags);
ctx_info = &iwqp->ctx_info;
} else {
@@ -397,10 +412,6 @@ irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
- irdma_dev_err(&iwdev->ibdev,
- "AEQ: abnormal ae_id = 0x%x (%s), is_qp = %d, qp_id = %d, ae_source = %d\n",
- info->ae_id, irdma_get_ae_desc(info->ae_id),
- info->qp, info->qp_cq_id, info->ae_src);
if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
ctx_info->roce_info->err_rq_idx_valid = info->err_rq_idx_valid;
if (info->rq) {
@@ -435,6 +446,8 @@ irdma_process_aeq(struct irdma_pci_f *rf)
if (aeqcnt)
irdma_sc_repost_aeq_entries(dev, aeqcnt);
+
+ return (aeqcnt > 0);
}
/**
@@ -449,11 +462,11 @@ irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
}
/**
- * irdma_dpc - tasklet for aeq and ceq 0
+ * irdma_aeq_ceq0_tasklet_cb - tasklet for aeq and ceq 0
* @t: tasklet_struct ptr
*/
static void
-irdma_dpc(unsigned long t)
+irdma_aeq_ceq0_tasklet_cb(unsigned long t)
{
struct irdma_pci_f *rf = from_tasklet(rf, (struct tasklet_struct *)t,
dpc_tasklet);
@@ -465,11 +478,11 @@ irdma_dpc(unsigned long t)
}
/**
- * irdma_ceq_dpc - dpc handler for CEQ
+ * irdma_ceq_tasklet_cb - tasklet handler for CEQ
* @t: tasklet_struct ptr
*/
static void
-irdma_ceq_dpc(unsigned long t)
+irdma_ceq_tasklet_cb(unsigned long t)
{
struct irdma_ceq *iwceq = from_tasklet(iwceq, (struct tasklet_struct *)t,
dpc_tasklet);
@@ -502,7 +515,7 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
size = sizeof(struct irdma_msix_vector) * rf->msix_count;
size += sizeof(*iw_qvlist);
- size += sizeof(*iw_qvinfo) * rf->msix_count - 1;
+ size += sizeof(*iw_qvinfo) * (rf->msix_count - 1);
rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
if (!rf->iw_msixtbl)
return -ENOMEM;
@@ -538,11 +551,11 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
}
/**
- * irdma_irq_handler - interrupt handler for aeq and ceq0
+ * irdma_aeq_ceq0_irq_handler - interrupt handler for aeq and ceq0
* @data: RDMA PCI function
*/
static void
-irdma_irq_handler(void *data)
+irdma_aeq_ceq0_irq_handler(void *data)
{
struct irdma_pci_f *rf = data;
@@ -550,11 +563,11 @@ irdma_irq_handler(void *data)
}
/**
- * irdma_ceq_handler - interrupt handler for ceq
+ * irdma_ceq_irq_handler - interrupt handler for ceq
* @data: ceq pointer
*/
static void
-irdma_ceq_handler(void *data)
+irdma_ceq_irq_handler(void *data)
{
struct irdma_ceq *iwceq = data;
@@ -625,8 +638,6 @@ irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
struct irdma_cqp *cqp = &rf->cqp;
int status = 0;
- if (rf->cqp_cmpl_wq)
- destroy_workqueue(rf->cqp_cmpl_wq);
status = irdma_sc_cqp_destroy(dev->cqp, free_hwcqp);
if (status)
irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy CQP failed %d\n", status);
@@ -794,6 +805,8 @@ irdma_destroy_ccq(struct irdma_pci_f *rf)
struct irdma_ccq *ccq = &rf->ccq;
int status = 0;
+ if (rf->cqp_cmpl_wq)
+ destroy_workqueue(rf->cqp_cmpl_wq);
if (!rf->reset)
status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
if (status)
@@ -964,7 +977,7 @@ irdma_obj_aligned_mem(struct irdma_pci_f *rf,
static int
irdma_create_cqp(struct irdma_pci_f *rf)
{
- u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
+ u32 sqsize = IRDMA_CQP_SW_SQSIZE_MAX;
struct irdma_dma_mem mem;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_cqp_init_info cqp_init_info = {0};
@@ -1012,6 +1025,7 @@ irdma_create_cqp(struct irdma_pci_f *rf)
cqp_init_info.scratch_array = cqp->scratch_array;
cqp_init_info.protocol_used = rf->protocol_used;
cqp_init_info.en_rem_endpoint_trk = rf->en_rem_endpoint_trk;
+ cqp_init_info.timer_slots = rf->timer_slots;
memcpy(&cqp_init_info.dcqcn_params, &rf->dcqcn_params,
sizeof(cqp_init_info.dcqcn_params));
@@ -1077,12 +1091,13 @@ irdma_create_ccq(struct irdma_pci_f *rf)
struct irdma_ccq_init_info info = {0};
struct irdma_ccq *ccq = &rf->ccq;
int status;
+ int ccq_size = IW_CCQ_SIZE;
dev->ccq = &ccq->sc_cq;
dev->ccq->dev = dev;
info.dev = dev;
ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
- ccq->mem_cq.size = sizeof(struct irdma_cqe) * IW_CCQ_SIZE;
+ ccq->mem_cq.size = sizeof(struct irdma_cqe) * ccq_size;
ccq->mem_cq.va = irdma_allocate_dma_mem(dev->hw, &ccq->mem_cq,
ccq->mem_cq.size,
IRDMA_CQ0_ALIGNMENT);
@@ -1099,7 +1114,7 @@ irdma_create_ccq(struct irdma_pci_f *rf)
/* populate the ccq init info */
info.cq_base = ccq->mem_cq.va;
info.cq_pa = ccq->mem_cq.pa;
- info.num_elem = IW_CCQ_SIZE;
+ info.num_elem = ccq_size;
info.shadow_area = ccq->shadow_area.va;
info.shadow_area_pa = ccq->shadow_area.pa;
info.ceqe_mask = false;
@@ -1205,8 +1220,8 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
if (rf->msix_shared && !ceq_id) {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
"irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
- tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
- status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf);
+ tasklet_setup(&rf->dpc_tasklet, irdma_aeq_ceq0_tasklet_cb);
+ status = irdma_irq_request(rf, msix_vec, irdma_aeq_ceq0_irq_handler, rf);
if (status)
return status;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
@@ -1214,9 +1229,9 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
"irdma-%s-CEQ-%d",
dev_name(&rf->pcidev->dev), ceq_id);
- tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
+ tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_tasklet_cb);
- status = irdma_irq_request(rf, msix_vec, irdma_ceq_handler, iwceq);
+ status = irdma_irq_request(rf, msix_vec, irdma_ceq_irq_handler, iwceq);
if (status)
return status;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
@@ -1243,8 +1258,8 @@ irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
if (!rf->msix_shared) {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
"irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
- tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
- status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf);
+ tasklet_setup(&rf->dpc_tasklet, irdma_aeq_ceq0_tasklet_cb);
+ status = irdma_irq_request(rf, msix_vec, irdma_aeq_ceq0_irq_handler, rf);
if (status)
return status;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
@@ -1277,7 +1292,6 @@ irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
int status;
struct irdma_ceq_init_info info = {0};
struct irdma_sc_dev *dev = &rf->sc_dev;
- u64 scratch;
u32 ceq_size;
info.ceq_id = ceq_id;
@@ -1300,14 +1314,13 @@ irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
iwceq->sc_ceq.ceq_id = ceq_id;
info.dev = dev;
info.vsi = vsi;
- scratch = (uintptr_t)&rf->cqp.sc_cqp;
status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
if (!status) {
if (dev->ceq_valid)
status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
IRDMA_OP_CEQ_CREATE);
else
- status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
+ status = irdma_sc_cceq_create(&iwceq->sc_ceq);
}
if (status) {
@@ -1576,7 +1589,7 @@ irdma_initialize_ilq(struct irdma_device *iwdev)
info.buf_size = 1024;
info.tx_buf_cnt = 2 * info.sq_size;
info.receive = irdma_receive_ilq;
- info.xmit_complete = irdma_free_sqbuf;
+ info.xmit_complete = irdma_cm_ilq_cmpl_handler;
status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ilq create fail\n");
@@ -1827,6 +1840,16 @@ irdma_setup_init_state(struct irdma_pci_f *rf)
if (status)
goto clean_obj_mem;
+ /*
+ * Apply sysctl settings to max_hw_ird/ord
+ */
+ rf->sc_dev.hw_attrs.max_hw_ird = irdma_sysctl_max_ird;
+ rf->sc_dev.hw_attrs.max_hw_ord = irdma_sysctl_max_ord;
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
+ "using max_hw_ird = %d and max_hw_ord = %d\n",
+ rf->sc_dev.hw_attrs.max_hw_ird,
+ rf->sc_dev.hw_attrs.max_hw_ord);
+
return 0;
clean_obj_mem:
@@ -2060,7 +2083,7 @@ irdma_ctrl_init_hw(struct irdma_pci_f *rf)
break;
rf->init_state = CEQ0_CREATED;
/* Handles processing of CQP completions */
- rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
+ rf->cqp_cmpl_wq = alloc_ordered_workqueue("irdma-cqp_cmpl_wq",
WQ_HIGHPRI | WQ_UNBOUND);
if (!rf->cqp_cmpl_wq) {
status = -ENOMEM;
@@ -2373,6 +2396,7 @@ irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(iwcqp, cqp_request);
@@ -2406,6 +2430,8 @@ irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
cqp_info->post_sq = 1;
cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
+
status = irdma_handle_cqp_op(rf, cqp_request);
if (!status)
*mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
@@ -2436,7 +2462,6 @@ irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
- memset(info, 0, sizeof(*info));
info->add = add_port;
info->port = accel_local_port;
cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
@@ -2519,35 +2544,22 @@ irdma_del_apbvt(struct irdma_device *iwdev,
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
}
-/**
- * irdma_manage_arp_cache - manage hw arp cache
- * @rf: RDMA PCI function
- * @mac_addr: mac address ptr
- * @ip_addr: ip addr for arp cache
- * @action: add, delete or modify
- */
void
-irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
- u32 *ip_addr, u32 action)
+irdma_arp_cqp_op(struct irdma_pci_f *rf, u16 arp_index,
+ const unsigned char *mac_addr, u32 action)
{
struct irdma_add_arp_cache_entry_info *info;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
- int arp_index;
-
- arp_index = irdma_arp_table(rf, ip_addr, mac_addr, action);
- if (arp_index == -1)
- return;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
if (!cqp_request)
return;
cqp_info = &cqp_request->info;
- if (action == IRDMA_ARP_ADD) {
+ if (action == IRDMA_ARP_ADD_UPDATE) {
cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
info = &cqp_info->in.u.add_arp_cache_entry.info;
- memset(info, 0, sizeof(*info));
info->arp_index = (u16)arp_index;
info->permanent = true;
ether_addr_copy(info->mac_addr, mac_addr);
@@ -2568,6 +2580,26 @@ irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
}
/**
+ * irdma_manage_arp_cache - manage hw arp cache
+ * @rf: RDMA PCI function
+ * @mac_addr: mac address ptr
+ * @ip_addr: ip addr for arp cache
+ * @action: add, delete or modify
+ */
+void
+irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
+ u32 *ip_addr, u32 action)
+{
+ int arp_index;
+
+ arp_index = irdma_arp_table(rf, ip_addr, mac_addr, action);
+ if (arp_index == -1)
+ return;
+
+ irdma_arp_cqp_op(rf, (u16)arp_index, mac_addr, action);
+}
+
+/**
* irdma_send_syn_cqp_callback - do syn/ack after qhash
* @cqp_request: qhash cqp completion
*/
@@ -2577,7 +2609,7 @@ irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
struct irdma_cm_node *cm_node = cqp_request->param;
irdma_send_syn(cm_node, 1);
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
}
/**
@@ -2598,7 +2630,6 @@ irdma_qhash_info_prepare(struct irdma_device *iwdev,
struct irdma_qhash_table_info *info;
info = &cqp_info->in.u.manage_qhash_table_entry.info;
- memset(info, 0, sizeof(*info));
info->vsi = &iwdev->vsi;
info->manage = mtype;
info->entry_type = etype;
@@ -2731,7 +2762,7 @@ irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
cqp_request->param = cmnode;
if (!wait)
- atomic_inc(&cm_node->refcnt);
+ irdma_add_ref_cmnode(cm_node);
}
if (info->ipv4_valid)
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
@@ -2759,7 +2790,7 @@ irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
if (status && cm_node && !wait)
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
irdma_put_cqp_request(iwcqp, cqp_request);
@@ -2873,6 +2904,9 @@ irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
return;
+ if (atomic_cmpxchg(&iwqp->flush_issued, 0, 1))
+ return;
+
/* Set flush info fields */
info.sq = flush_mask & IRDMA_FLUSH_SQ;
info.rq = flush_mask & IRDMA_FLUSH_RQ;
@@ -2896,7 +2930,9 @@ irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
if (info.rq && iwqp->sc_qp.rq_flush_code)
info.rq_minor_code = flush_code;
}
- if (irdma_upload_context && irdma_upload_qp_context(iwqp, 0, 1))
+ if (irdma_upload_context &&
+ irdma_upload_qp_context(rf, iwqp->sc_qp.qp_uk.qp_id,
+ iwqp->sc_qp.qp_uk.qp_type, 0, 1))
irdma_dev_warn(&iwqp->iwdev->ibdev, "failed to upload QP context\n");
if (!iwqp->user_mode)
irdma_sched_qp_flush_work(iwqp);
@@ -2905,5 +2941,4 @@ irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
/* Issue flush */
(void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
flush_mask & IRDMA_FLUSH_WAIT);
- iwqp->flush_issued = true;
}
diff --git a/sys/dev/irdma/irdma_kcompat.c b/sys/dev/irdma/irdma_kcompat.c
index 4261fb45d390..51f44133252c 100644
--- a/sys/dev/irdma/irdma_kcompat.c
+++ b/sys/dev/irdma/irdma_kcompat.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2018 - 2023 Intel Corporation
+ * Copyright (c) 2018 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -470,25 +470,49 @@ irdma_create_ah_wait(struct irdma_pci_f *rf,
int ret;
if (!sleep) {
- int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
- CQP_TIMEOUT_THRESHOLD;
+ bool timeout = false;
+ u64 start = get_jiffies_64();
+ u64 completed_ops = atomic64_read(&rf->sc_dev.cqp->completed_ops);
struct irdma_cqp_request *cqp_request =
sc_ah->ah_info.cqp_request;
+ const u64 timeout_jiffies =
+ msecs_to_jiffies(rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
+ CQP_TIMEOUT_THRESHOLD);
+
+ /*
+ * NOTE: irdma_check_cqp_progress is not used here because it relies on a notion of a cycle count, but
+ * we want to avoid unnecessary delays. We are in an atomic context here, so we might as well check in
+ * a tight loop.
+ */
+ while (!READ_ONCE(cqp_request->request_done)) {
+ u64 tmp;
+ u64 curr_jiffies;
- do {
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
- mdelay(1);
- } while (!READ_ONCE(cqp_request->request_done) && --cnt);
- if (cnt && !cqp_request->compl_info.op_ret_val) {
+ curr_jiffies = get_jiffies_64();
+ tmp = atomic64_read(&rf->sc_dev.cqp->completed_ops);
+ if (tmp != completed_ops) {
+ /* CQP is progressing. Reset timer. */
+ completed_ops = tmp;
+ start = curr_jiffies;
+ }
+
+ if ((curr_jiffies - start) > timeout_jiffies) {
+ timeout = true;
+ break;
+ }
+ }
+
+ if (!timeout && !cqp_request->compl_info.op_ret_val) {
irdma_put_cqp_request(&rf->cqp, cqp_request);
sc_ah->ah_info.ah_valid = true;
} else {
- ret = !cnt ? -ETIMEDOUT : -EINVAL;
+ ret = timeout ? -ETIMEDOUT : -EINVAL;
irdma_dev_err(&rf->iwdev->ibdev, "CQP create AH error ret = %d opt_ret_val = %d",
ret, cqp_request->compl_info.op_ret_val);
irdma_put_cqp_request(&rf->cqp, cqp_request);
- if (!cnt && !rf->reset) {
+ if (timeout && !rf->reset) {
rf->reset = true;
rf->gen_ops.request_reset(rf);
}
@@ -501,19 +525,10 @@ irdma_create_ah_wait(struct irdma_pci_f *rf,
#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
-/**
- * irdma_create_ah - create address handle
- * @ib_ah: ptr to AH
- * @attr: address handle attributes
- * @flags: AH flags to wait
- * @udata: user data
- *
- * returns 0 on success, error otherwise
- */
-int
-irdma_create_ah(struct ib_ah *ib_ah,
- struct ib_ah_attr *attr, u32 flags,
- struct ib_udata *udata)
+static int
+irdma_create_sleepable_ah(struct ib_ah *ib_ah,
+ struct ib_ah_attr *attr, u32 flags,
+ struct ib_udata *udata)
{
struct irdma_pd *pd = to_iwpd(ib_ah->pd);
struct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah);
@@ -613,6 +628,23 @@ err_gid_l2:
return err;
}
+/**
+ * irdma_create_ah - create address handle
+ * @ib_ah: ptr to AH
+ * @attr: address handle attributes
+ * @flags: AH flags to wait
+ * @udata: user data
+ *
+ * returns 0 on success, error otherwise
+ */
+int
+irdma_create_ah(struct ib_ah *ib_ah,
+ struct ib_ah_attr *attr, u32 flags,
+ struct ib_udata *udata)
+{
+ return irdma_create_sleepable_ah(ib_ah, attr, flags, udata);
+}
+
void
irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
{
@@ -691,6 +723,7 @@ irdma_create_qp(struct ib_pd *ibpd,
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {{0}};
struct irdma_qp_host_ctx_info *ctx_info;
+ u32 next_qp = 0;
unsigned long flags;
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
@@ -743,6 +776,9 @@ irdma_create_qp(struct ib_pd *ibpd,
if (init_attr->qp_type == IB_QPT_GSI)
qp_num = 1;
+ else if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
+ &qp_num, &next_qp);
else
err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
&qp_num, &rf->next_qp);
@@ -759,7 +795,7 @@ irdma_create_qp(struct ib_pd *ibpd,
iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
init_info.pd = &iwpd->sc_pd;
- init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+ init_info.qp_uk_init_info.qp_id = qp_num;
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
init_info.qp_uk_init_info.first_sq_wq = 1;
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
@@ -769,10 +805,11 @@ irdma_create_qp(struct ib_pd *ibpd,
spin_lock_init(&iwqp->dwork_flush_lock);
if (udata) {
+ INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_user_flush_worker);
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr);
} else {
- INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
+ INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_kern_flush_worker);
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
}
@@ -946,7 +983,6 @@ irdma_create_cq(struct ib_cq *ibcq,
unsigned long flags;
int err_code;
int entries = attr->cqe;
- bool cqe_64byte_ena;
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
@@ -966,10 +1002,9 @@ irdma_create_cq(struct ib_cq *ibcq,
INIT_LIST_HEAD(&iwcq->resize_list);
INIT_LIST_HEAD(&iwcq->cmpl_generated);
info.dev = dev;
- ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_size = max_t(int, entries, 4);
ukinfo->cq_id = cq_num;
- cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false;
- ukinfo->avoid_mem_cflct = cqe_64byte_ena;
+ iwcq->cq_num = cq_num;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
atomic_set(&iwcq->armed, 0);
if (attr->comp_vector < rf->ceqs_count)
@@ -1004,8 +1039,6 @@ irdma_create_cq(struct ib_cq *ibcq,
err_code = -EPROTO;
goto cq_free_rsrc;
}
- iwcq->iwpbl = iwpbl;
- iwcq->cq_mem_size = 0;
cqmr = &iwpbl->cq_mr;
if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
@@ -1019,7 +1052,6 @@ irdma_create_cq(struct ib_cq *ibcq,
err_code = -EPROTO;
goto cq_free_rsrc;
}
- iwcq->iwpbl_shadow = iwpbl_shadow;
cqmr_shadow = &iwpbl_shadow->cq_mr;
info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
cqmr->split = true;
@@ -1043,14 +1075,11 @@ irdma_create_cq(struct ib_cq *ibcq,
}
entries++;
- if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
entries *= 2;
ukinfo->cq_size = entries;
- if (cqe_64byte_ena)
- rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
- else
- rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE);
iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
iwcq->kmem.size, IRDMA_HW_PAGE_SIZE);
@@ -1094,6 +1123,7 @@ irdma_create_cq(struct ib_cq *ibcq,
cqp_info->in.u.cq_create.cq = cq;
cqp_info->in.u.cq_create.check_overflow = true;
cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
if (status) {
@@ -1114,7 +1144,7 @@ irdma_create_cq(struct ib_cq *ibcq,
}
}
- rf->cq_table[cq_num] = iwcq;
+ WRITE_ONCE(rf->cq_table[cq_num], iwcq);
init_completion(&iwcq->free_cq);
return 0;
@@ -1227,6 +1257,86 @@ done:
return 0;
}
+/**
+ * irdma_reg_user_mr - Register a user memory region
+ * @pd: ptr of pd
+ * @start: virtual start address
+ * @len: length of mr
+ * @virt: virtual address
+ * @access: access of mr
+ * @udata: user data
+ */
+struct ib_mr *
+irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ u64 virt, int access,
+ struct ib_udata *udata)
+{
+#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_mem_reg_req req = {};
+ struct ib_umem *region;
+ struct irdma_mr *iwmr;
+ int err;
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
+ return ERR_PTR(-EINVAL);
+
+ region = ib_umem_get(pd->uobject->context, start, len, access, 0);
+
+ if (IS_ERR(region)) {
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
+ "Failed to create ib_umem region err=%ld\n",
+ PTR_ERR(region));
+ return (struct ib_mr *)region;
+ }
+
+ if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
+ ib_umem_release(region);
+ return ERR_PTR(-EFAULT);
+ }
+
+ iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
+ if (IS_ERR(iwmr)) {
+ ib_umem_release(region);
+ return (struct ib_mr *)iwmr;
+ }
+
+ switch (req.reg_type) {
+ case IRDMA_MEMREG_TYPE_QP:
+ err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
+ if (err)
+ goto error;
+
+ break;
+ case IRDMA_MEMREG_TYPE_CQ:
+ err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
+ if (err)
+ goto error;
+
+ break;
+ case IRDMA_MEMREG_TYPE_MEM:
+ err = irdma_reg_user_mr_type_mem(iwmr, access, true);
+ if (err)
+ goto error;
+
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
+ }
+
+ return &iwmr->ibmr;
+
+error:
+ ib_umem_release(region);
+ irdma_free_iwmr(iwmr);
+
+ return ERR_PTR(err);
+}
+
/*
* irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer @flags: bit mask to
* indicate which of the attr's of MR modified @start: virtual start address @len: length of mr @virt: virtual address
@@ -1512,20 +1622,19 @@ irdma_query_port(struct ib_device *ibdev, u8 port,
/* no need to zero out pros here. done by caller */
props->max_mtu = IB_MTU_4096;
- props->active_mtu = ib_mtu_int_to_enum(if_getmtu(netdev));
+ props->active_mtu = min(props->max_mtu, iboe_get_mtu(if_getmtu(netdev)));
props->lid = 1;
props->lmc = 0;
props->sm_lid = 0;
props->sm_sl = 0;
- if ((if_getlinkstate(netdev) == LINK_STATE_UP) &&
- (if_getdrvflags(netdev) & IFF_DRV_RUNNING)) {
+ if ((if_getlinkstate(netdev) == LINK_STATE_UP) && (if_getdrvflags(netdev) & IFF_DRV_RUNNING)) {
props->state = IB_PORT_ACTIVE;
props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
props->state = IB_PORT_DOWN;
props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
- ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width);
+ ib_get_eth_speed(ibdev, port, (u16 *)& props->active_speed, &props->active_width);
if (rdma_protocol_roce(ibdev, 1)) {
props->gid_tbl_len = 32;
@@ -1720,36 +1829,104 @@ kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ);
}
-int
-ib_get_eth_speed(struct ib_device *ibdev, u32 port_num, u8 *speed, u8 *width)
+static void
+ib_get_width_and_speed(u32 netdev_speed, u32 lanes,
+ u16 *speed, u8 *width)
{
- if_t netdev = ibdev->get_netdev(ibdev, port_num);
- u32 netdev_speed;
+ if (!lanes) {
+ if (netdev_speed <= SPEED_1000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_SDR;
+ } else if (netdev_speed <= SPEED_10000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_20000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_DDR;
+ } else if (netdev_speed <= SPEED_25000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_40000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_50000) {
+ *width = IB_WIDTH_2X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_100000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_200000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_HDR;
+ } else {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_NDR;
+ }
- if (!netdev)
- return -ENODEV;
+ return;
+ }
- netdev_speed = if_getbaudrate(netdev);
- dev_put(netdev);
- if (netdev_speed <= SPEED_1000) {
+ switch (lanes) {
+ case 1:
*width = IB_WIDTH_1X;
- *speed = IB_SPEED_SDR;
- } else if (netdev_speed <= SPEED_10000) {
- *width = IB_WIDTH_1X;
- *speed = IB_SPEED_FDR10;
- } else if (netdev_speed <= SPEED_20000) {
+ break;
+ case 2:
+ *width = IB_WIDTH_2X;
+ break;
+ case 4:
*width = IB_WIDTH_4X;
- *speed = IB_SPEED_DDR;
- } else if (netdev_speed <= SPEED_25000) {
+ break;
+ case 8:
+ *width = IB_WIDTH_8X;
+ break;
+ case 12:
+ *width = IB_WIDTH_12X;
+ break;
+ default:
*width = IB_WIDTH_1X;
- *speed = IB_SPEED_EDR;
- } else if (netdev_speed <= SPEED_40000) {
- *width = IB_WIDTH_4X;
+ }
+
+ switch (netdev_speed / lanes) {
+ case SPEED_2500:
+ *speed = IB_SPEED_SDR;
+ break;
+ case SPEED_5000:
+ *speed = IB_SPEED_DDR;
+ break;
+ case SPEED_10000:
*speed = IB_SPEED_FDR10;
- } else {
- *width = IB_WIDTH_4X;
+ break;
+ case SPEED_14000:
+ *speed = IB_SPEED_FDR;
+ break;
+ case SPEED_25000:
*speed = IB_SPEED_EDR;
+ break;
+ case SPEED_50000:
+ *speed = IB_SPEED_HDR;
+ break;
+ case SPEED_100000:
+ *speed = IB_SPEED_NDR;
+ break;
+ default:
+ *speed = IB_SPEED_SDR;
}
+}
+
+int
+ib_get_eth_speed(struct ib_device *ibdev, u32 port_num, u16 *speed, u8 *width)
+{
+ if_t netdev = ibdev->get_netdev(ibdev, port_num);
+ u32 netdev_speed, lanes;
+
+ if (!netdev)
+ return -ENODEV;
+
+ netdev_speed = (u32)if_getbaudrate(netdev);
+ dev_put(netdev);
+ lanes = 0;
+
+ ib_get_width_and_speed(netdev_speed, lanes, speed, width);
return 0;
}
diff --git a/sys/dev/irdma/irdma_main.h b/sys/dev/irdma/irdma_main.h
index 9181f3b70463..1dc455532819 100644
--- a/sys/dev/irdma/irdma_main.h
+++ b/sys/dev/irdma/irdma_main.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2025 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -74,16 +74,17 @@ extern bool irdma_upload_context;
#define IRDMA_FW_VER_DEFAULT 2
#define IRDMA_HW_VER 2
-#define IRDMA_ARP_ADD 1
+#define IRDMA_ARP_ADD_UPDATE 1
+#define IRDMA_ARP_ADD IRDMA_ARP_ADD_UPDATE
#define IRDMA_ARP_DELETE 2
#define IRDMA_ARP_RESOLVE 3
#define IRDMA_MACIP_ADD 1
#define IRDMA_MACIP_DELETE 2
-#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
-#define IW_CEQ_SIZE 2048
-#define IW_AEQ_SIZE 2048
+#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_MAX + 2)
+#define IW_CEQ_SIZE 2048
+#define IW_AEQ_SIZE 2048
#define RX_BUF_SIZE (1536 + 8)
#define IW_REG0_SIZE (4 * 1024)
@@ -96,6 +97,7 @@ extern bool irdma_upload_context;
#define IRDMA_EVENT_TIMEOUT_MS 5000
#define IRDMA_VCHNL_EVENT_TIMEOUT_MS 10000
+#define IRDMA_RETRY_PRINT_MS 5000
#define IRDMA_RST_TIMEOUT_HZ 4
#define IRDMA_NO_QSET 0xffff
@@ -107,7 +109,6 @@ extern bool irdma_upload_context;
#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
#define IRDMA_Q_TYPE_PE_AEQ 0x80
-#define IRDMA_Q_INVALID_IDX 0xffff
#define IRDMA_REM_ENDPOINT_TRK_QPID 3
#define IRDMA_DRV_OPT_ENA_MPA_VER_0 0x00000001
@@ -228,6 +229,8 @@ struct irdma_aeq {
struct irdma_arp_entry {
u32 ip_addr[4];
u8 mac_addr[ETHER_ADDR_LEN];
+ atomic_t refcnt;
+ bool delete_pending:1;
};
struct irdma_msix_vector {
@@ -314,6 +317,7 @@ struct irdma_pci_f {
u32 next_ws_node_id;
u32 max_ws_node_id;
u32 limits_sel;
+ u8 timer_slots;
unsigned long *allocated_ws_nodes;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
@@ -351,8 +355,6 @@ struct irdma_pci_f {
struct msix_entry msix_info;
struct irdma_dma_mem obj_mem;
struct irdma_dma_mem obj_next;
- atomic_t vchnl_msgs;
- wait_queue_head_t vchnl_waitq;
struct workqueue_struct *cqp_cmpl_wq;
struct work_struct cqp_cmpl_work;
struct irdma_sc_vsi default_vsi;
@@ -361,6 +363,15 @@ struct irdma_pci_f {
void (*check_fc)(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp);
struct irdma_dcqcn_cc_params dcqcn_params;
struct irdma_device *iwdev;
+ struct delayed_work dwork_cqp_poll;
+ u32 chk_stag;
+};
+
+struct irdma_ae_info {
+ spinlock_t info_lock;
+ atomic_t ae_cnt;
+ u32 retry_cnt;
+ unsigned long retry_delay;
};
struct irdma_device {
@@ -372,6 +383,7 @@ struct irdma_device {
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
struct irdma_cm_core cm_core;
+ struct irdma_ae_info ae_info;
u32 roce_cwnd;
u32 roce_ackcreds;
u32 vendor_id;
@@ -395,7 +407,6 @@ struct irdma_device {
bool dcb_vlan_mode:1;
bool iw_ooo:1;
enum init_completion_state init_state;
-
wait_queue_head_t suspend_wq;
};
@@ -522,6 +533,8 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp);
void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
+void irdma_arp_cqp_op(struct irdma_pci_f *rf, u16 arp_index,
+ const unsigned char *mac_addr, u32 action);
void irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
u32 *ip_addr, u32 action);
struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
@@ -555,6 +568,8 @@ void irdma_cq_add_ref(struct ib_cq *ibcq);
void irdma_cq_rem_ref(struct ib_cq *ibcq);
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
+void irdma_chk_free_stag(struct irdma_pci_f *rf);
+void cqp_poll_worker(struct work_struct *work);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
struct irdma_modify_qp_info *info, bool wait);
@@ -565,7 +580,7 @@ int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
bool wait);
int irdma_add_qhash_wait_no_lock(struct irdma_device *iwdev, struct irdma_cm_info *cminfo);
void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
-void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
+void irdma_cm_ilq_cmpl_handler(struct irdma_sc_vsi *vsi, void *bufp);
void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
@@ -586,8 +601,9 @@ u16 irdma_get_vlan_ipv4(struct iw_cm_id *cm_id, u32 *addr);
void irdma_get_vlan_mac_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id,
u8 *mac);
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
- int acc, u64 *iova_start);
-int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
+ int acc, u64 *iova_start, bool dma_mr);
+int irdma_upload_qp_context(struct irdma_pci_f *rf, u32 qpn,
+ u8 qp_type, bool freeze, bool raw);
void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info, bool privileged,
bool reset, enum irdma_vers vers);
@@ -597,7 +613,6 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
void irdma_udqp_qs_worker(struct work_struct *work);
-bool irdma_cq_empty(struct irdma_cq *iwcq);
int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
void irdma_unregister_notifiers(struct irdma_device *iwdev);
diff --git a/sys/dev/irdma/irdma_pble.c b/sys/dev/irdma/irdma_pble.c
index aaf9d8917622..9d6d89b3f881 100644
--- a/sys/dev/irdma/irdma_pble.c
+++ b/sys/dev/irdma/irdma_pble.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -108,7 +108,7 @@ static void
get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
{
- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
}
@@ -545,12 +545,14 @@ void
irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
- pble_rsrc->freedpbles += palloc->total_cnt;
-
if (palloc->level == PBLE_LEVEL_2)
free_lvl2(pble_rsrc, palloc);
else
irdma_prm_return_pbles(&pble_rsrc->pinfo,
&palloc->level1.chunkinfo);
+
+ mutex_lock(&pble_rsrc->pble_mutex_lock);
+ pble_rsrc->freedpbles += palloc->total_cnt;
pble_rsrc->stats_alloc_freed++;
+ mutex_unlock(&pble_rsrc->pble_mutex_lock);
}
diff --git a/sys/dev/irdma/irdma_protos.h b/sys/dev/irdma/irdma_protos.h
index 0663f9591d52..09a35e5a1f97 100644
--- a/sys/dev/irdma/irdma_protos.h
+++ b/sys/dev/irdma/irdma_protos.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2016 - 2023 Intel Corporation
+ * Copyright (c) 2016 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -52,6 +52,7 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
bool post_sq);
void irdma_init_config_check(struct irdma_config_check *cc,
u8 traffic_class,
+ u8 prio,
u16 qs_handle);
/* HMC/FPM functions */
int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u16 hmc_fn_id);
@@ -64,8 +65,6 @@ int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
u8 op);
int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
u8 op);
-int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
- struct irdma_stats_inst_info *stats_info);
void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
struct irdma_gather_stats *gather_stats,
struct irdma_gather_stats *last_gather_stats,
@@ -115,7 +114,7 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev);
void free_sd_mem(struct irdma_sc_dev *dev);
int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
struct cqp_cmds_info *pcmdinfo);
-int irdma_process_bh(struct irdma_sc_dev *dev);
+void irdma_process_bh(struct irdma_sc_dev *dev);
extern void dump_ctx(struct irdma_sc_dev *dev, u32 pf_num, u32 qp_num);
void dumpCSR(struct irdma_sc_dev *dev);
void dumpCSRx(struct irdma_sc_dev *dev);
diff --git a/sys/dev/irdma/irdma_puda.c b/sys/dev/irdma/irdma_puda.c
index 0c5b9c164d76..5dc978259685 100644
--- a/sys/dev/irdma/irdma_puda.c
+++ b/sys/dev/irdma/irdma_puda.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -435,6 +435,7 @@ irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
/* reusing so synch the buffer for CPU use */
dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
+ buf->queued = false;
rsrc->xmit_complete(rsrc->vsi, buf);
spin_lock_irqsave(&rsrc->bufpool_lock, flags);
rsrc->tx_wqe_avail_cnt++;
@@ -536,7 +537,7 @@ irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
* @rsrc: resource to use for buffer
* @buf: puda buffer to transmit
*/
-void
+int
irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf)
{
@@ -545,17 +546,28 @@ irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
unsigned long flags;
spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ if (buf) {
+ if (buf->queued) {
+ irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
+ "PUDA: Attempting to re-send queued buf %p\n",
+ buf);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ return -EINVAL;
+ }
+
+ buf->queued = true;
+ }
/*
* if no wqe available or not from a completion and we have pending buffers, we must queue new buffer
*/
if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
list_add_tail(&buf->list, &rsrc->txpend);
- spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
rsrc->stats_sent_pkt_q++;
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
"adding to txpend\n");
- return;
+ return 0;
}
rsrc->tx_wqe_avail_cnt--;
/*
@@ -595,6 +607,7 @@ irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
}
done:
spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ return 0;
}
/**
@@ -737,11 +750,14 @@ irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
irdma_qp_add_qos(qp);
irdma_puda_qp_setctx(rsrc);
+ qp->qp_state = IRDMA_QP_STATE_RTS;
+
if (rsrc->dev->ceq_valid)
ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
else
ret = irdma_puda_qp_wqe(rsrc->dev, qp);
if (ret) {
+ qp->qp_state = IRDMA_QP_STATE_INVALID;
irdma_qp_rem_qos(qp);
rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
@@ -964,6 +980,7 @@ irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
irdma_free_hash_desc(rsrc->hash_desc);
/* fallthrough */
case PUDA_QP_CREATED:
+ rsrc->qp.qp_state = IRDMA_QP_STATE_INVALID;
irdma_qp_rem_qos(&rsrc->qp);
if (!reset)
diff --git a/sys/dev/irdma/irdma_puda.h b/sys/dev/irdma/irdma_puda.h
index aff435a90ecd..73e5c42f3c09 100644
--- a/sys/dev/irdma/irdma_puda.h
+++ b/sys/dev/irdma/irdma_puda.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -94,20 +94,22 @@ struct irdma_puda_buf {
u8 *iph;
u8 *tcph;
u8 *data;
+ u32 seqnum;
+ u32 ah_id;
+ u32 totallen; /* machlen+iphlen+tcphlen+datalen */
u16 datalen;
u16 vlan_id;
u8 tcphlen; /* tcp length in bytes */
u8 maclen; /* mac length in bytes */
- u32 totallen; /* machlen+iphlen+tcphlen+datalen */
- atomic_t refcount;
+ atomic_t pb_refcount;
u8 hdrlen;
bool virtdma:1;
bool ipv4:1;
bool vlan_valid:1;
bool do_lpb:1; /* Loopback buffer */
bool smac_valid:1;
- u32 seqnum;
- u32 ah_id;
+ bool queued:1;
+ struct irdma_sc_ah *ah;
u8 smac[ETHER_ADDR_LEN];
struct irdma_sc_vsi *vsi;
};
@@ -184,7 +186,7 @@ struct irdma_puda_rsrc {
struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc);
void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf);
-void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
+int irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf);
int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info);
int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
diff --git a/sys/dev/irdma/irdma_type.h b/sys/dev/irdma/irdma_type.h
index 011245f37779..77db328f940b 100644
--- a/sys/dev/irdma/irdma_type.h
+++ b/sys/dev/irdma/irdma_type.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -279,6 +279,7 @@ struct irdma_cqp_init_info {
u8 hmc_profile;
u8 ena_vf_count;
u8 ceqs_per_vf;
+ u8 timer_slots;
bool en_datacenter_tcp:1;
bool disable_packed:1;
bool rocev2_rto_policy:1;
@@ -414,6 +415,7 @@ struct irdma_sc_cqp {
u8 ena_vf_count;
u8 timeout_count;
u8 ceqs_per_vf;
+ u8 timer_slots;
bool en_datacenter_tcp:1;
bool disable_packed:1;
bool rocev2_rto_policy:1;
@@ -442,8 +444,8 @@ struct irdma_sc_ceq {
struct irdma_sc_dev *dev;
struct irdma_ceqe *ceqe_base;
void *pbl_list;
- u32 ceq_id;
u32 elem_cnt;
+ u16 ceq_id;
struct irdma_ring ceq_ring;
u8 pbl_chunk_size;
u8 tph_val;
@@ -466,8 +468,8 @@ struct irdma_sc_cq {
struct irdma_sc_vsi *vsi;
void *pbl_list;
void *back_cq;
- u32 ceq_id;
u32 shadow_read_threshold;
+ u16 ceq_id;
u8 pbl_chunk_size;
u8 cq_type;
u8 tph_val;
@@ -505,6 +507,7 @@ struct irdma_sc_qp {
u8 hw_sq_size;
u8 hw_rq_size;
u8 src_mac_addr_idx;
+ bool suspended:1;
bool on_qoslist:1;
bool ieq_pass_thru:1;
bool sq_tph_en:1;
@@ -523,12 +526,6 @@ struct irdma_sc_qp {
struct list_head list;
};
-struct irdma_stats_inst_info {
- u16 hmc_fn_id;
- u16 stats_idx;
- bool use_hmc_fcn_index:1;
-};
-
struct irdma_up_info {
u8 map[8];
u8 cnp_up_override;
@@ -540,6 +537,13 @@ struct irdma_up_info {
#define IRDMA_MAX_WS_NODES 0x3FF
#define IRDMA_WS_NODE_INVALID 0xFFFF
+struct irdma_ws_move_node_info {
+ u16 node_id[16];
+ u8 num_nodes;
+ u8 target_port;
+ bool resume_traffic:1;
+};
+
struct irdma_ws_node_info {
u16 id;
u16 vsi;
@@ -582,6 +586,7 @@ struct irdma_config_check {
bool lfc_set:1;
bool pfc_set:1;
u8 traffic_class;
+ u8 prio;
u16 qs_handle;
};
@@ -599,7 +604,6 @@ struct irdma_sc_vsi {
u32 exception_lan_q;
u16 mtu;
enum irdma_vm_vf_type vm_vf_type;
- bool stats_inst_alloc:1;
bool tc_change_pending:1;
bool mtu_change_pending:1;
struct irdma_vsi_pestat *pestat;
@@ -653,7 +657,6 @@ struct irdma_sc_dev {
u16 num_vfs;
u16 hmc_fn_id;
bool ceq_valid:1;
- u8 pci_rev;
int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_reset)(struct irdma_sc_vsi *vsi);
@@ -773,7 +776,7 @@ struct irdma_ceq_init_info {
u64 *ceqe_base;
void *pbl_list;
u32 elem_cnt;
- u32 ceq_id;
+ u16 ceq_id;
bool virtual_map:1;
bool tph_en:1;
bool itr_no_expire:1;
@@ -804,8 +807,8 @@ struct irdma_ccq_init_info {
__le64 *shadow_area;
void *pbl_list;
u32 num_elem;
- u32 ceq_id;
u32 shadow_read_threshold;
+ u16 ceq_id;
bool ceqe_mask:1;
bool ceq_id_valid:1;
bool avoid_mem_cflct:1;
@@ -1004,7 +1007,6 @@ struct irdma_allocate_stag_info {
u16 access_rights;
bool remote_access:1;
bool use_hmc_fcn_index:1;
- bool use_pf_rid:1;
bool all_memory:1;
u16 hmc_fcn_index;
};
@@ -1032,7 +1034,6 @@ struct irdma_reg_ns_stag_info {
irdma_stag_key stag_key;
bool use_hmc_fcn_index:1;
u16 hmc_fcn_index;
- bool use_pf_rid:1;
bool all_memory:1;
};
@@ -1056,7 +1057,6 @@ struct irdma_fast_reg_stag_info {
bool push_wqe:1;
bool use_hmc_fcn_index:1;
u16 hmc_fcn_index;
- bool use_pf_rid:1;
bool defer_flag:1;
};
@@ -1065,6 +1065,7 @@ struct irdma_dealloc_stag_info {
u32 pd_id;
bool mr:1;
bool dealloc_pbl:1;
+ bool skip_flush_markers:1;
};
struct irdma_register_shared_stag {
@@ -1102,8 +1103,8 @@ struct irdma_cq_init_info {
struct irdma_sc_dev *dev;
u64 cq_base_pa;
u64 shadow_area_pa;
- u32 ceq_id;
u32 shadow_read_threshold;
+ u16 ceq_id;
u8 pbl_chunk_size;
u32 first_pm_pbl_idx;
bool virtual_map:1;
@@ -1204,7 +1205,7 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
struct irdma_ccq_init_info *info);
-int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
+int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq);
int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
@@ -1473,12 +1474,6 @@ struct cqp_info {
struct {
struct irdma_sc_cqp *cqp;
- struct irdma_stats_inst_info info;
- u64 scratch;
- } stats_manage;
-
- struct {
- struct irdma_sc_cqp *cqp;
struct irdma_stats_gather_info info;
u64 scratch;
} stats_gather;
@@ -1491,6 +1486,12 @@ struct cqp_info {
struct {
struct irdma_sc_cqp *cqp;
+ struct irdma_ws_move_node_info info;
+ u64 scratch;
+ } ws_move_node;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
struct irdma_up_info info;
u64 scratch;
} up_map;
@@ -1508,6 +1509,8 @@ struct cqp_cmds_info {
u8 cqp_cmd;
u8 post_sq;
struct cqp_info in;
+ int cqp_cmd_exec_status;
+ bool create;
};
__le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
diff --git a/sys/dev/irdma/irdma_uda.h b/sys/dev/irdma/irdma_uda.h
index 9850f986ee67..d21b811844af 100644
--- a/sys/dev/irdma/irdma_uda.h
+++ b/sys/dev/irdma/irdma_uda.h
@@ -44,6 +44,7 @@ struct irdma_sc_cqp;
struct irdma_ah_info {
struct irdma_sc_vsi *vsi;
struct irdma_cqp_request *cqp_request;
+ atomic_t ah_refcnt;
u32 pd_idx;
u32 dst_arpindex;
u32 dest_ip_addr[4];
@@ -63,6 +64,7 @@ struct irdma_ah_info {
struct irdma_sc_ah {
struct irdma_sc_dev *dev;
struct irdma_ah_info ah_info;
+ struct work_struct ah_free_work;
};
int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
diff --git a/sys/dev/irdma/irdma_uda_d.h b/sys/dev/irdma/irdma_uda_d.h
index bbf66bd8680d..1907abcecf12 100644
--- a/sys/dev/irdma/irdma_uda_d.h
+++ b/sys/dev/irdma/irdma_uda_d.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2016 - 2021 Intel Corporation
+ * Copyright (c) 2016 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -183,6 +183,8 @@
#define IRDMA_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_MAV_OPCODE_S 32
#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_MAV_TYPE_S 53
+#define IRDMA_UDA_CQPSQ_MAV_TYPE GENMASK_ULL(54, 53)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_S 62
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID_S 59
diff --git a/sys/dev/irdma/irdma_uk.c b/sys/dev/irdma/irdma_uk.c
index 6c2e2dfb0031..cbe80fc59722 100644
--- a/sys/dev/irdma/irdma_uk.c
+++ b/sys/dev/irdma/irdma_uk.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -133,16 +133,18 @@ irdma_nop_1(struct irdma_qp_uk *qp)
void
irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
{
- __le64 *wqe;
+ struct irdma_qp_quanta *sq;
u32 wqe_idx;
if (!(qp_wqe_idx & 0x7F)) {
wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
- wqe = qp->sq_base[wqe_idx].elem;
+ sq = qp->sq_base + wqe_idx;
if (wqe_idx)
- memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
+ memset(sq, qp->swqe_polarity ? 0 : 0xFF,
+ 128 * sizeof(*sq));
else
- memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
+ memset(sq, qp->swqe_polarity ? 0xFF : 0,
+ 128 * sizeof(*sq));
}
}
@@ -200,22 +202,65 @@ irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
qp->push_dropped = false;
}
+/**
+ * irdma_qp_push_wqe - setup push wqe and ring db
+ * @qp: hw qp ptr
+ * @wqe: wqe ptr
+ * @quanta: numbers of quanta in wqe
+ * @wqe_idx: wqe index
+ * @push_wqe: if to use push for the wqe
+ */
void
irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
- u32 wqe_idx, bool post_sq)
+ u32 wqe_idx, bool push_wqe)
{
__le64 *push;
- if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
- IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
- !qp->push_mode) {
- irdma_uk_qp_post_wr(qp);
- } else {
+ if (push_wqe) {
push = (__le64 *) ((uintptr_t)qp->push_wqe +
(wqe_idx & 0x7) * 0x20);
irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
irdma_qp_ring_push_db(qp, wqe_idx);
+ qp->last_push_db = true;
+ } else if (qp->last_push_db) {
+ qp->last_push_db = false;
+ db_wr32(qp->qp_id, qp->wqe_alloc_db);
+ } else {
+ irdma_uk_qp_post_wr(qp);
+ }
+}
+
+/**
+ * irdma_push_ring_free - check if sq ring free to pust push wqe
+ * @qp: hw qp ptr
+ */
+static inline bool
+irdma_push_ring_free(struct irdma_qp_uk *qp)
+{
+ u32 head, tail;
+
+ head = IRDMA_RING_CURRENT_HEAD(qp->initial_ring);
+ tail = IRDMA_RING_CURRENT_TAIL(qp->sq_ring);
+
+ if (head == tail || head == (tail + 1))
+ return true;
+
+ return false;
+}
+
+/**
+ * irdma_enable_push_wqe - depending on sq ring and total size
+ * @qp: hw qp ptr
+ * @total_size: total data size
+ */
+static inline bool
+irdma_enable_push_wqe(struct irdma_qp_uk *qp, u32 total_size)
+{
+ if (irdma_push_ring_free(qp) &&
+ total_size <= qp->uk_attrs->max_hw_push_len) {
+ return true;
}
+ return false;
}
/**
@@ -234,7 +279,8 @@ irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
__le64 *wqe;
__le64 *wqe_0 = NULL;
u32 nop_wqe_idx;
- u16 avail_quanta, wqe_quanta = *quanta;
+ u16 wqe_quanta = *quanta;
+ u16 avail_quanta;
u16 i;
avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
@@ -330,7 +376,7 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
+ info->push_wqe = false;
op_info = &info->op.rdma_write;
if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
@@ -350,11 +396,13 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
+ if (qp->push_db)
+ info->push_wqe = irdma_enable_push_wqe(qp, total_size);
+
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return -ENOSPC;
- qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
set_64bit_val(wqe, IRDMA_BYTE_16,
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
@@ -399,8 +447,8 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -429,7 +477,7 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
u16 quanta;
u64 hdr;
- info->push_wqe = qp->push_db ? true : false;
+ info->push_wqe &= qp->push_db ? true : false;
op_info = &info->op.rdma_read;
if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
@@ -451,7 +499,6 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
qp->ord_cnt = 0;
}
- qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
addl_frag_cnt = op_info->num_lo_sges > 1 ?
(op_info->num_lo_sges - 1) : 0;
local_fence |= info->local_fence;
@@ -490,8 +537,8 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -517,7 +564,7 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
+ info->push_wqe = false;
op_info = &info->op.send;
if (qp->max_sq_frag_cnt < op_info->num_sges)
@@ -534,6 +581,9 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
+ if (qp->push_db)
+ info->push_wqe = irdma_enable_push_wqe(qp, total_size);
+
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return -ENOSPC;
@@ -587,8 +637,8 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -748,11 +798,11 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
return -EINVAL;
quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return -ENOSPC;
- qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
read_fence |= info->read_fence;
set_64bit_val(wqe, IRDMA_BYTE_16,
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
@@ -780,8 +830,8 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -854,8 +904,8 @@ irdma_uk_inline_send(struct irdma_qp_uk *qp,
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -905,8 +955,8 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -1065,29 +1115,6 @@ irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx)
}
/**
- * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed
- *
- * @ring: sq/rq ring
- * @flush_seen: information if flush for specific ring was already seen
- * @comp_status: completion status
- * @wqe_idx: new value of WQE index returned if there is more work on ring
- */
-static inline int
-irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
- enum irdma_cmpl_status comp_status,
- u32 *wqe_idx)
-{
- if (flush_seen) {
- if (IRDMA_RING_MORE_WORK(ring))
- *wqe_idx = ring.tail;
- else
- return -ENOENT;
- }
-
- return 0;
-}
-
-/**
* irdma_detect_unsignaled_cmpls - check if unsignaled cmpl is to be reported
* @cq: hw cq
* @qp: hw qp
@@ -1140,6 +1167,28 @@ irdma_detect_unsignaled_cmpls(struct irdma_cq_uk *cq,
}
/**
+ * irdma_uk_cq_empty - Check if CQ is empty
+ * @cq: hw cq
+ */
+bool
+irdma_uk_cq_empty(struct irdma_cq_uk *cq)
+{
+ __le64 *cqe;
+ u8 polarity;
+ u64 qword3;
+
+ if (cq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != cq->polarity;
+}
+
+/**
* irdma_uk_cq_poll_cmpl - get cq completion info
* @cq: hw cq
* @info: cq poll information returned
@@ -1158,6 +1207,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
u8 polarity;
bool ext_valid;
__le64 *ext_cqe;
+ unsigned long flags;
if (cq->avoid_mem_cflct)
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
@@ -1229,6 +1279,10 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
+ if (!qp || qp->destroy_pending) {
+ ret_code = -EFAULT;
+ goto exit;
+ }
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
@@ -1253,15 +1307,10 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
- info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0);
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
- if (!qp || qp->destroy_pending) {
- ret_code = -EFAULT;
- goto exit;
- }
wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
@@ -1269,51 +1318,42 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
u32 array_idx;
- ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
- qp->rq_flush_seen,
- info->comp_status,
- &wqe_idx);
- if (ret_code != 0)
- goto exit;
-
array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+ info->signaled = 1;
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
+ spin_lock_irqsave(qp->lock, flags);
if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
ret_code = -ENOENT;
+ spin_unlock_irqrestore(qp->lock, flags);
goto exit;
}
info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
- info->signaled = 1;
- array_idx = qp->rq_ring.tail;
+ IRDMA_RING_SET_TAIL(qp->rq_ring, qp->rq_ring.tail + 1);
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
+ qp->rq_flush_complete = true;
+ else
+ move_cq_head = false;
+ spin_unlock_irqrestore(qp->lock, flags);
} else {
info->wr_id = qp->rq_wrid_array[array_idx];
- info->signaled = 1;
if (irdma_check_rq_cqe(qp, &array_idx)) {
info->wr_id = qp->rq_wrid_array[array_idx];
info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
return 0;
}
- }
-
- info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
-
- if (qword3 & IRDMACQ_STAG) {
- info->stag_invalid_set = true;
- info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
- } else {
- info->stag_invalid_set = false;
- }
- IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
- if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
- qp->rq_flush_seen = true;
- if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
- qp->rq_flush_complete = true;
- else
- move_cq_head = false;
+ IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
}
pring = &qp->rq_ring;
} else { /* q_type is IRDMA_CQE_QTYPE_SQ */
@@ -1335,12 +1375,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
qp->push_mode = false;
qp->push_dropped = true;
}
- ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring,
- qp->sq_flush_seen,
- info->comp_status,
- &wqe_idx);
- if (ret_code != 0)
- goto exit;
if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
@@ -1353,8 +1387,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
IRDMA_RING_SET_TAIL(qp->sq_ring,
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
} else {
- unsigned long flags;
-
spin_lock_irqsave(qp->lock, flags);
if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
spin_unlock_irqrestore(qp->lock, flags);
@@ -1386,7 +1418,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
info->minor_err == FLUSH_PROT_ERR)
info->minor_err = FLUSH_MW_BIND_ERR;
- qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
spin_unlock_irqrestore(qp->lock, flags);
@@ -1416,8 +1447,9 @@ exit:
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
if (!cq->avoid_mem_cflct && ext_valid)
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
- set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
- IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ if (IRDMA_RING_CURRENT_HEAD(cq->cq_ring) & 0x3F || irdma_uk_cq_empty(cq))
+ set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
} else {
qword3 &= ~IRDMA_CQ_WQEIDX;
qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
@@ -1482,15 +1514,16 @@ irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
int
irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
{
- u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
-
- *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
+ u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+ u64 hw_quanta =
+ irdma_round_up_wq(((u64)sq_size << shift) + IRDMA_SQ_RSVD);
- if (*sqdepth < min_size)
- *sqdepth = min_size;
- else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
+ if (hw_quanta < min_hw_quanta)
+ hw_quanta = min_hw_quanta;
+ else if (hw_quanta > uk_attrs->max_hw_wq_quanta)
return -EINVAL;
+ *sqdepth = hw_quanta;
return 0;
}
@@ -1501,15 +1534,16 @@ irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *s
int
irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
{
- u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
-
- *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
+ u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+ u64 hw_quanta =
+ irdma_round_up_wq(((u64)rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < min_size)
- *rqdepth = min_size;
- else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
+ if (hw_quanta < min_hw_quanta)
+ hw_quanta = min_hw_quanta;
+ else if (hw_quanta > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
+ *rqdepth = hw_quanta;
return 0;
}
diff --git a/sys/dev/irdma/irdma_user.h b/sys/dev/irdma/irdma_user.h
index 05828ebbd7d6..71b08d9d486d 100644
--- a/sys/dev/irdma/irdma_user.h
+++ b/sys/dev/irdma/irdma_user.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -184,12 +184,10 @@ enum irdma_device_caps_const {
IRDMA_MIN_IW_QP_ID = 0,
IRDMA_QUERY_FPM_BUF_SIZE = 176,
IRDMA_COMMIT_FPM_BUF_SIZE = 176,
- IRDMA_MAX_IW_QP_ID = 262143,
IRDMA_MIN_CEQID = 0,
IRDMA_MAX_CEQID = 1023,
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
IRDMA_MIN_CQID = 0,
- IRDMA_MAX_CQID = 524287,
IRDMA_MIN_AEQ_ENTRIES = 1,
IRDMA_MAX_AEQ_ENTRIES = 524287,
IRDMA_MIN_CEQ_ENTRIES = 1,
@@ -284,7 +282,7 @@ struct irdma_cq_uk_init_info;
struct irdma_ring {
volatile u32 head;
- volatile u32 tail; /* effective tail */
+ volatile u32 tail;
u32 size;
};
@@ -385,12 +383,6 @@ struct irdma_cq_poll_info {
bool ud_smac_valid:1;
bool imm_valid:1;
bool signaled:1;
- union {
- u32 tcp_sqn;
- u32 roce_psn;
- u32 rtt;
- u32 raw;
- } stat;
};
struct qp_err_code {
@@ -426,6 +418,7 @@ struct irdma_wqe_uk_ops {
struct irdma_bind_window *op_info);
};
+bool irdma_uk_cq_empty(struct irdma_cq_uk *cq);
int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
@@ -465,6 +458,8 @@ struct irdma_qp_uk {
__le64 *shadow_area;
__le32 *push_db;
__le64 *push_wqe;
+ void *push_db_map;
+ void *push_wqe_map;
struct irdma_ring sq_ring;
struct irdma_ring sq_sig_ring;
struct irdma_ring rq_ring;
@@ -494,12 +489,11 @@ struct irdma_qp_uk {
bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
bool destroy_pending:1; /* Indicates the QP is being destroyed */
+ bool last_push_db:1; /* Indicates last DB was push DB */
void *back_qp;
spinlock_t *lock;
u8 dbg_rq_flushed;
u16 ord_cnt;
- u8 sq_flush_seen;
- u8 rq_flush_seen;
u8 rd_fence_rate;
};
@@ -563,10 +557,12 @@ int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
-int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
-int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size,
+ u8 shift, u32 *sqdepth);
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size,
+ u8 shift, u32 *rqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
- u32 wqe_idx, bool post_sq);
+ u32 wqe_idx, bool push_wqe);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
diff --git a/sys/dev/irdma/irdma_utils.c b/sys/dev/irdma/irdma_utils.c
index 038f1980082b..ef8cb38d8139 100644
--- a/sys/dev/irdma/irdma_utils.c
+++ b/sys/dev/irdma/irdma_utils.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,7 @@
LIST_HEAD(irdma_handlers);
DEFINE_SPINLOCK(irdma_handler_lock);
+static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS];
static const struct ae_desc ae_desc_list[] = {
{IRDMA_AE_AMP_UNALLOCATED_STAG, "Unallocated memory key (L-Key/R-Key)"},
{IRDMA_AE_AMP_INVALID_STAG, "Invalid memory key (L-Key/R-Key)"},
@@ -206,7 +207,7 @@ irdma_get_ae_desc(u16 ae_id)
* @rf: RDMA PCI function
* @ip_addr: ip address for device
* @mac_addr: mac address ptr
- * @action: modify, delete or add
+ * @action: modify, delete or add/update
*/
int
irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
@@ -220,22 +221,22 @@ irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
spin_lock_irqsave(&rf->arp_lock, flags);
for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
- if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
+ if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)) &&
+ !rf->arp_table[arp_index].delete_pending)
break;
}
switch (action) {
- case IRDMA_ARP_ADD:
- if (arp_index != rf->arp_table_size) {
- arp_index = -1;
- break;
- }
-
- arp_index = 0;
- if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
- (u32 *)&arp_index, &rf->next_arp_index)) {
- arp_index = -1;
- break;
+ case IRDMA_ARP_ADD_UPDATE: /* ARP Add or Update */
+ if (arp_index == rf->arp_table_size) {
+ if (irdma_alloc_rsrc(rf, rf->allocated_arps,
+ rf->arp_table_size,
+ (u32 *)&arp_index,
+ &rf->next_arp_index)) {
+ arp_index = -1;
+ break;
+ }
+ atomic_set(&rf->arp_table[arp_index].refcnt, 0);
}
memcpy(rf->arp_table[arp_index].ip_addr, ip,
@@ -252,10 +253,16 @@ irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
break;
}
- memset(rf->arp_table[arp_index].ip_addr, 0,
- sizeof(rf->arp_table[arp_index].ip_addr));
- eth_zero_addr(rf->arp_table[arp_index].mac_addr);
- irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
+ if (!atomic_read(&rf->arp_table[arp_index].refcnt)) {
+ memset(rf->arp_table[arp_index].ip_addr, 0,
+ sizeof(rf->arp_table[arp_index].ip_addr));
+ eth_zero_addr(rf->arp_table[arp_index].mac_addr);
+ irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
+ rf->arp_table[arp_index].delete_pending = false;
+ } else {
+ rf->arp_table[arp_index].delete_pending = true;
+ arp_index = -1; /* prevent immediate CQP ARP index deletion */
+ }
break;
default:
arp_index = -1;
@@ -266,8 +273,61 @@ irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
return arp_index;
}
+static int
+irdma_get_arp(struct irdma_pci_f *rf, u16 arp_index)
+{
+ unsigned long flags;
+ u32 ip_zero[4] = {};
+
+ if (arp_index >= rf->arp_table_size)
+ return -EINVAL;
+
+ spin_lock_irqsave(&rf->arp_lock, flags);
+ if (!memcmp(rf->arp_table[arp_index].ip_addr, ip_zero, sizeof(ip_zero))) {
+ spin_unlock_irqrestore(&rf->arp_lock, flags);
+ return -EINVAL;
+ }
+ if (!atomic_read(&rf->arp_table[arp_index].refcnt))
+ atomic_set(&rf->arp_table[arp_index].refcnt, 1);
+ else
+ atomic_inc(&rf->arp_table[arp_index].refcnt);
+ spin_unlock_irqrestore(&rf->arp_lock, flags);
+
+ return 0;
+}
+
+static void
+irdma_put_arp(struct irdma_pci_f *rf, u16 arp_index)
+{
+ unsigned long flags;
+
+ if (arp_index >= rf->arp_table_size)
+ return;
+ spin_lock_irqsave(&rf->arp_lock, flags);
+ if (!atomic_dec_and_test(&rf->arp_table[arp_index].refcnt)) {
+ spin_unlock_irqrestore(&rf->arp_lock, flags);
+ return;
+ }
+
+ if (rf->arp_table[arp_index].delete_pending) {
+ u32 ip_addr[4];
+
+ memcpy(ip_addr, rf->arp_table[arp_index].ip_addr,
+ sizeof(ip_addr));
+ memset(rf->arp_table[arp_index].ip_addr, 0,
+ sizeof(rf->arp_table[arp_index].ip_addr));
+ eth_zero_addr(rf->arp_table[arp_index].mac_addr);
+ spin_unlock_irqrestore(&rf->arp_lock, flags);
+ irdma_arp_cqp_op(rf, arp_index, NULL, IRDMA_ARP_DELETE);
+ rf->arp_table[arp_index].delete_pending = false;
+ irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
+ } else {
+ spin_unlock_irqrestore(&rf->arp_lock, flags);
+ }
+}
+
/**
- * irdma_add_arp - add a new arp entry if needed
+ * irdma_add_arp - add a new arp entry if needed and resolve it
* @rf: RDMA function
* @ip: IP address
* @mac: MAC address
@@ -275,18 +335,7 @@ irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
int
irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac)
{
- int arpidx;
-
- arpidx = irdma_arp_table(rf, &ip[0], NULL, IRDMA_ARP_RESOLVE);
- if (arpidx >= 0) {
- if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
- return arpidx;
-
- irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
- IRDMA_ARP_DELETE);
- }
-
- irdma_manage_arp_cache(rf, mac, ip, IRDMA_ARP_ADD);
+ irdma_manage_arp_cache(rf, mac, ip, IRDMA_ARP_ADD_UPDATE);
return irdma_arp_table(rf, ip, NULL, IRDMA_ARP_RESOLVE);
}
@@ -378,6 +427,8 @@ irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
atomic_set(&cqp_request->refcnt, 1);
memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
+ memset(&cqp_request->info, 0, sizeof(cqp_request->info));
+
return cqp_request;
}
@@ -503,6 +554,17 @@ irdma_wait_event(struct irdma_pci_f *rf,
READ_ONCE(cqp_request->request_done),
msecs_to_jiffies(wait_time_ms)))
break;
+ if (cqp_request->info.cqp_cmd_exec_status) {
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_CQP,
+ "%s (%d) cqp op error status reported: %d, %d %x %x\n",
+ irdma_cqp_cmd_names[cqp_request->info.cqp_cmd],
+ cqp_request->info.cqp_cmd,
+ cqp_request->info.cqp_cmd_exec_status,
+ cqp_request->compl_info.error,
+ cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+ break;
+ }
irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
@@ -540,7 +602,7 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
[IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
[IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
[IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
- [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
+ [IRDMA_OP_AEQ_CREATE] = "AEQ Create Cmd",
[IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
[IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
[IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
@@ -635,7 +697,7 @@ irdma_handle_cqp_op(struct irdma_pci_f *rf,
bool put_cqp_request = true;
if (rf->reset)
- return 0;
+ return (info->create ? -EBUSY : 0);
irdma_get_cqp_request(cqp_request);
status = irdma_process_cqp_cmd(dev, info);
@@ -654,13 +716,23 @@ irdma_handle_cqp_op(struct irdma_pci_f *rf,
err:
if (irdma_cqp_crit_err(dev, info->cqp_cmd,
cqp_request->compl_info.maj_err_code,
- cqp_request->compl_info.min_err_code))
+ cqp_request->compl_info.min_err_code)) {
+ int qpn = -1;
+
+ if (info->cqp_cmd == IRDMA_OP_QP_CREATE)
+ qpn = cqp_request->info.in.u.qp_create.qp->qp_uk.qp_id;
+ else if (info->cqp_cmd == IRDMA_OP_QP_MODIFY)
+ qpn = cqp_request->info.in.u.qp_modify.qp->qp_uk.qp_id;
+ else if (info->cqp_cmd == IRDMA_OP_QP_DESTROY)
+ qpn = cqp_request->info.in.u.qp_destroy.qp->qp_uk.qp_id;
+
irdma_dev_err(&rf->iwdev->ibdev,
- "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
- irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status,
+ "[%s Error] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x qpn=%d\n",
+ irdma_cqp_cmd_names[info->cqp_cmd], status,
cqp_request->waiting, cqp_request->compl_info.error,
cqp_request->compl_info.maj_err_code,
- cqp_request->compl_info.min_err_code);
+ cqp_request->compl_info.min_err_code, qpn);
+ }
if (put_cqp_request)
irdma_put_cqp_request(&rf->cqp, cqp_request);
@@ -715,7 +787,7 @@ irdma_cq_rem_ref(struct ib_cq *ibcq)
return;
}
- rf->cq_table[iwcq->cq_num] = NULL;
+ WRITE_ONCE(rf->cq_table[iwcq->cq_num], NULL);
spin_unlock_irqrestore(&rf->cqtable_lock, flags);
complete(&iwcq->free_cq);
}
@@ -1009,6 +1081,7 @@ irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
cqp_info->post_sq = 1;
cqp_info->in.u.cq_create.cq = cq;
cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(iwcqp, cqp_request);
@@ -1037,13 +1110,13 @@ irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
cqp_info = &cqp_request->info;
qp_info = &cqp_request->info.in.u.qp_create.info;
- memset(qp_info, 0, sizeof(*qp_info));
qp_info->cq_num_valid = true;
qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
cqp_info->post_sq = 1;
cqp_info->in.u.qp_create.qp = qp;
cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(iwcqp, cqp_request);
@@ -1060,10 +1133,10 @@ void
irdma_dealloc_push_page(struct irdma_pci_f *rf,
struct irdma_qp *iwqp)
{
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
int status;
- struct irdma_sc_qp *qp = &iwqp->sc_qp;
if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
return;
@@ -1081,6 +1154,7 @@ irdma_dealloc_push_page(struct irdma_pci_f *rf,
cqp_info->in.u.manage_push_page.info.push_page_type = 0;
cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
status = irdma_handle_cqp_op(rf, cqp_request);
if (!status)
qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
@@ -1160,6 +1234,7 @@ irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
cqp_info->post_sq = 1;
cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = false;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
if (status) {
@@ -1241,7 +1316,6 @@ irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
cqp_info->post_sq = 1;
cqp_info->in.u.qp_destroy.qp = qp;
@@ -1306,7 +1380,7 @@ irdma_ieq_get_qp(struct irdma_sc_dev *dev,
return NULL;
iwqp = cm_node->iwqp;
- irdma_rem_ref_cm_node(cm_node);
+ irdma_rem_ref_cmnode(cm_node);
return &iwqp->sc_qp;
}
@@ -1598,7 +1672,6 @@ irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
cqp_info->post_sq = 1;
cqp_info->in.u.stats_gather.info = pestat->gather_info;
@@ -1616,44 +1689,6 @@ irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
}
/**
- * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
- * @vsi: pointer to vsi structure
- * @cmd: command to allocate or free
- * @stats_info: pointer to allocate stats info
- */
-int
-irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
- struct irdma_stats_inst_info *stats_info)
-{
- struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
- struct irdma_cqp *iwcqp = &rf->cqp;
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- int status;
- bool wait = false;
-
- if (cmd == IRDMA_OP_STATS_ALLOCATE)
- wait = true;
- cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
- cqp_info->cqp_cmd = cmd;
- cqp_info->post_sq = 1;
- cqp_info->in.u.stats_manage.info = *stats_info;
- cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
- cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
- status = irdma_handle_cqp_op(rf, cqp_request);
- if (wait)
- stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
- irdma_put_cqp_request(iwcqp, cqp_request);
-
- return status;
-}
-
-/**
* irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
* @dev: pointer to device info
* @sc_ceq: pointer to ceq structure
@@ -1743,12 +1778,12 @@ irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = cmd;
cqp_info->post_sq = 1;
cqp_info->in.u.ws_node.info = *node_info;
cqp_info->in.u.ws_node.cqp = cqp;
cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
status = irdma_handle_cqp_op(rf, cqp_request);
if (status)
goto exit;
@@ -1773,7 +1808,7 @@ exit:
}
/**
- * irdma_ah_cqp_op - perform an AH cqp operation
+ * irdma_ah_do_cqp - perform an AH cqp operation
* @rf: RDMA PCI function
* @sc_ah: address handle
* @cmd: AH operation
@@ -1783,8 +1818,8 @@ exit:
*
* returns errno
*/
-int
-irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
+static int
+irdma_ah_do_cqp(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
bool wait,
void (*callback_fcn) (struct irdma_cqp_request *),
void *cb_param)
@@ -1811,6 +1846,7 @@ irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
cqp_info->in.u.ah_create.info = sc_ah->ah_info;
cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
+ cqp_info->create = true;
} else if (cmd == IRDMA_OP_AH_DESTROY) {
cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
@@ -1833,6 +1869,36 @@ irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
return 0;
}
+int
+irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
+ bool wait,
+ void (*callback_fcn) (struct irdma_cqp_request *),
+ void *cb_param)
+{
+ int status;
+
+ if (cmd == IRDMA_OP_AH_CREATE) {
+ status = irdma_get_arp(rf, sc_ah->ah_info.dst_arpindex);
+ if (status) {
+ irdma_dev_err(&rf->iwdev->ibdev, "%s get_arp failed for index = %d\n",
+ __func__, sc_ah->ah_info.dst_arpindex);
+
+ return -EINVAL;
+ }
+ status = irdma_ah_do_cqp(rf, sc_ah, cmd, wait, callback_fcn,
+ cb_param);
+ if (status)
+ irdma_put_arp(rf, sc_ah->ah_info.dst_arpindex);
+ } else {
+ status = irdma_ah_do_cqp(rf, sc_ah, cmd, wait, callback_fcn,
+ cb_param);
+ if (cmd == IRDMA_OP_AH_DESTROY)
+ irdma_put_arp(rf, sc_ah->ah_info.dst_arpindex);
+ }
+
+ return status;
+}
+
/**
* irdma_ieq_ah_cb - callback after creation of AH for IEQ
* @cqp_request: pointer to cqp_request of create AH
@@ -1853,6 +1919,9 @@ irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
}
spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
+ if (!cqp_request->waiting)
+ irdma_put_cqp_request(sc_ah->dev->cqp->back_cqp,
+ cqp_request);
}
/**
@@ -1867,6 +1936,9 @@ irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
irdma_add_conn_est_qh(cm_node);
+ if (!cqp_request->waiting)
+ irdma_put_cqp_request(sc_ah->dev->cqp->back_cqp,
+ cqp_request);
}
/**
@@ -2239,39 +2311,35 @@ clear_qp_ctx_addr(__le64 * ctx)
/**
* irdma_upload_qp_context - upload raw QP context
- * @iwqp: QP pointer
+ * @rf: RDMA PCI function
+ * @qpn: QP ID
+ * @qp_type: QP Type
* @freeze: freeze QP
* @raw: raw context flag
*/
int
-irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw)
+irdma_upload_qp_context(struct irdma_pci_f *rf, u32 qpn,
+ u8 qp_type, bool freeze, bool raw)
{
struct irdma_dma_mem dma_mem;
struct irdma_sc_dev *dev;
- struct irdma_sc_qp *qp;
struct irdma_cqp *iwcqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_upload_context_info *info;
- struct irdma_pci_f *rf;
int ret;
u32 *ctx;
- rf = iwqp->iwdev->rf;
- if (!rf)
- return -EINVAL;
-
- qp = &iwqp->sc_qp;
dev = &rf->sc_dev;
iwcqp = &rf->cqp;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
- if (!cqp_request)
+ if (!cqp_request) {
+ irdma_debug((dev), IRDMA_DEBUG_QP, "Could not get CQP req for QP [%u]\n", qpn);
return -EINVAL;
-
+ }
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.qp_upload_context.info;
- memset(info, 0, sizeof(struct irdma_upload_context_info));
cqp_info->cqp_cmd = IRDMA_OP_QP_UPLOAD_CONTEXT;
cqp_info->post_sq = 1;
cqp_info->in.u.qp_upload_context.dev = dev;
@@ -2281,6 +2349,7 @@ irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw)
dma_mem.va = irdma_allocate_dma_mem(dev->hw, &dma_mem, dma_mem.size, PAGE_SIZE);
if (!dma_mem.va) {
irdma_put_cqp_request(&rf->cqp, cqp_request);
+ irdma_debug((dev), IRDMA_DEBUG_QP, "Could not allocate buffer for QP [%u]\n", qpn);
return -ENOMEM;
}
@@ -2288,20 +2357,21 @@ irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw)
info->buf_pa = dma_mem.pa;
info->raw_format = raw;
info->freeze_qp = freeze;
- info->qp_type = qp->qp_uk.qp_type; /* 1 is iWARP and 2 UDA */
- info->qp_id = qp->qp_uk.qp_id;
+ info->qp_type = qp_type; /* 1 is iWARP and 2 UDA */
+ info->qp_id = qpn;
ret = irdma_handle_cqp_op(rf, cqp_request);
if (ret)
goto error;
- irdma_debug(dev, IRDMA_DEBUG_QP, "PRINT CONTXT QP [%d]\n", info->qp_id);
+ irdma_debug((dev), IRDMA_DEBUG_QP, "PRINT CONTXT QP [%u]\n", info->qp_id);
{
u32 i, j;
clear_qp_ctx_addr(dma_mem.va);
for (i = 0, j = 0; i < 32; i++, j += 4)
- irdma_debug(dev, IRDMA_DEBUG_QP,
- "%d:\t [%08X %08x %08X %08X]\n", (j * 4),
- ctx[j], ctx[j + 1], ctx[j + 2], ctx[j + 3]);
+ irdma_debug((dev), IRDMA_DEBUG_QP,
+ "[%u] %u:\t [%08X %08x %08X %08X]\n",
+ info->qp_id, (j * 4), ctx[j], ctx[j + 1],
+ ctx[j + 2], ctx[j + 3]);
}
error:
irdma_put_cqp_request(iwcqp, cqp_request);
@@ -2310,20 +2380,41 @@ error:
return ret;
}
-bool
-irdma_cq_empty(struct irdma_cq *iwcq)
+static bool
+qp_has_unpolled_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
{
- struct irdma_cq_uk *ukcq;
- u64 qword3;
+ struct irdma_cq_uk *cq = &iwcq->sc_cq.cq_uk;
+ struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
+ u32 cq_head = IRDMA_RING_CURRENT_HEAD(cq->cq_ring);
+ u64 qword3, comp_ctx;
__le64 *cqe;
- u8 polarity;
+ u8 polarity, cq_polarity;
+
+ cq_polarity = cq->polarity;
+ do {
+ if (cq->avoid_mem_cflct)
+ cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
+ else
+ cqe = cq->cq_base[cq_head].buf;
+ get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ if (polarity != cq_polarity)
+ break;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ rmb();
+
+ get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
+ if ((struct irdma_qp_uk *)(irdma_uintptr) comp_ctx == qp)
+ return true;
- ukcq = &iwcq->sc_cq.cq_uk;
- cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
- get_64bit_val(cqe, 24, &qword3);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ cq_head = (cq_head + 1) % cq->cq_ring.size;
+ if (!cq_head)
+ cq_polarity ^= 1;
+ } while (true);
- return polarity != ukcq->polarity;
+ return false;
}
void
@@ -2380,11 +2471,18 @@ irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
static inline void
irdma_comp_handler(struct irdma_cq *cq)
{
+ struct irdma_device *iwdev = to_iwdev(cq->ibcq.device);
+ struct irdma_ceq *ceq = &iwdev->rf->ceqlist[cq->sc_cq.ceq_id];
+ unsigned long flags;
+
if (!cq->ibcq.comp_handler)
return;
- if (atomic_cmpxchg(&cq->armed, 1, 0))
+ if (atomic_read(&cq->armed)) {
+ spin_lock_irqsave(&ceq->ce_lock, flags);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&ceq->ce_lock, flags);
+ }
}
/**
@@ -2405,7 +2503,7 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
unsigned long flags1;
spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
- if (irdma_cq_empty(iwqp->iwscq)) {
+ if (!qp_has_unpolled_cqes(iwqp, iwqp->iwscq)) {
unsigned long flags2;
spin_lock_irqsave(&iwqp->lock, flags2);
@@ -2452,7 +2550,7 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
}
spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
- if (irdma_cq_empty(iwqp->iwrcq)) {
+ if (!qp_has_unpolled_cqes(iwqp, iwqp->iwrcq)) {
unsigned long flags2;
spin_lock_irqsave(&iwqp->lock, flags2);
@@ -2527,3 +2625,49 @@ irdma_udqp_qs_worker(struct work_struct *work)
irdma_qp_rem_ref(&udqs_work->iwqp->ibqp);
kfree(udqs_work);
}
+
+void
+irdma_chk_free_stag(struct irdma_pci_f *rf)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_dealloc_stag_info *info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ info->stag_idx = RS_64_1(rf->chk_stag, IRDMA_CQPSQ_STAG_IDX_S);
+ cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &rf->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+void
+cqp_poll_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct irdma_pci_f *rf = container_of(dwork, struct irdma_pci_f, dwork_cqp_poll);
+ struct irdma_mr iwmr = {};
+ struct irdma_pd *iwpd;
+
+ iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
+ if (!iwpd)
+ return;
+ iwmr.stag = rf->chk_stag;
+ iwmr.ibmw.type = IB_MW_TYPE_1;
+ iwmr.ibmr.pd = &iwpd->ibpd;
+ if (irdma_hw_alloc_mw(rf->iwdev, &iwmr))
+ goto exit;
+ irdma_chk_free_stag(rf);
+
+ mod_delayed_work(rf->iwdev->cleanup_wq, &rf->dwork_cqp_poll,
+ msecs_to_jiffies(3000));
+exit:
+ kfree(iwpd);
+}
diff --git a/sys/dev/irdma/irdma_verbs.c b/sys/dev/irdma/irdma_verbs.c
index 59d4bf392562..a131286d1d37 100644
--- a/sys/dev/irdma/irdma_verbs.c
+++ b/sys/dev/irdma/irdma_verbs.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -60,7 +60,6 @@ irdma_query_device(struct ib_device *ibdev,
irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
props->vendor_id = pcidev->vendor;
props->vendor_part_id = pcidev->device;
props->hw_ver = pcidev->revision;
@@ -229,6 +228,7 @@ irdma_alloc_push_page(struct irdma_qp *iwqp)
cqp_info->in.u.manage_push_page.info.push_page_type = 0;
cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
if (!status && cqp_request->compl_info.op_ret_val <
@@ -273,9 +273,20 @@ irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
{
struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
unsigned long flags;
+ struct irdma_cmpl_gen *cmpl_node;
+ struct list_head *tmp_node, *list_node;
spin_lock_irqsave(&iwcq->lock, flags);
irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
+
+ list_for_each_safe(list_node, tmp_node, &iwcq->cmpl_generated) {
+ cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
+ if (cmpl_node->cpi.qp_id == iwqp->ibqp.qp_num) {
+ list_del(&cmpl_node->list);
+ kfree(cmpl_node);
+ }
+ }
+
spin_unlock_irqrestore(&iwcq->lock, flags);
}
@@ -390,8 +401,7 @@ irdma_setup_umode_qp(struct ib_udata *udata,
ret = ib_copy_from_udata(&req, udata,
min(sizeof(req), udata->inlen));
if (ret) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "ib_copy_from_data fail\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "ib_copy_from_data fail\n");
return ret;
}
@@ -406,8 +416,7 @@ irdma_setup_umode_qp(struct ib_udata *udata,
if (!iwqp->iwpbl) {
ret = -ENODATA;
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "no pbl info\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "no pbl info\n");
return ret;
}
}
@@ -524,7 +533,6 @@ irdma_setup_kmode_qp(struct irdma_device *iwdev,
info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
- ukinfo->qp_id = iwqp->ibqp.qp_num;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
@@ -549,7 +557,6 @@ irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
cqp_info = &cqp_request->info;
qp_info = &cqp_request->info.in.u.qp_create.info;
- memset(qp_info, 0, sizeof(*qp_info));
qp_info->mac_valid = true;
qp_info->cq_num_valid = true;
qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
@@ -558,6 +565,7 @@ irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
cqp_info->post_sq = 1;
cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
@@ -574,7 +582,7 @@ irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
struct irdma_udp_offload_info *udp_info;
udp_info = &iwqp->udp_info;
- udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
+ udp_info->snd_mss = ib_mtu_enum_to_int(iboe_get_mtu(iwdev->vsi.mtu));
udp_info->cwnd = iwdev->roce_cwnd;
udp_info->rexmit_thresh = 2;
udp_info->rnr_nak_thresh = 2;
@@ -680,7 +688,26 @@ irdma_sched_qp_flush_work(struct irdma_qp *iwqp)
}
void
-irdma_flush_worker(struct work_struct *work)
+irdma_user_flush_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp,
+ dwork_flush);
+
+ /*
+ * Set the WAIT flag to prevent a massive buildup of flush commands in the extreme case of many QPs lingering
+ * in the ERROR state.
+ */
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ | IRDMA_REFLUSH |
+ IRDMA_FLUSH_WAIT);
+
+ /* Re-arm continuously. Work is canceled when QP is deleted. */
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_PERIODIC_FLUSH_MS));
+}
+
+void
+irdma_kern_flush_worker(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
@@ -974,8 +1001,7 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_STATE) {
if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
iwqp->ibqp.qp_type, attr_mask)) {
- irdma_dev_warn(&iwdev->ibdev,
- "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
+ irdma_dev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
iwqp->ibqp.qp_num, iwqp->ibqp_state,
attr->qp_state);
ret = -EINVAL;
@@ -1022,8 +1048,7 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
udp_info->cwnd = iwdev->roce_cwnd;
roce_info->ack_credits = iwdev->roce_ackcreds;
if (iwdev->push_mode && udata &&
- iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
- dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_alloc_push_page(iwqp);
spin_lock_irqsave(&iwqp->lock, flags);
@@ -1090,14 +1115,14 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (iwqp->iwarp_state == info.curr_iwarp_state) {
iwqp->iwarp_state = info.next_iwarp_state;
iwqp->ibqp_state = attr->qp_state;
+ iwqp->sc_qp.qp_state = iwqp->iwarp_state;
}
if (iwqp->ibqp_state > IB_QPS_RTS &&
- !iwqp->flush_issued) {
+ !atomic_read(&iwqp->flush_issued)) {
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
IRDMA_FLUSH_RQ |
IRDMA_FLUSH_WAIT);
- iwqp->flush_issued = 1;
} else {
spin_unlock_irqrestore(&iwqp->lock, flags);
@@ -1198,8 +1223,7 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
issue_modify_qp = 1;
}
if (iwdev->push_mode && udata &&
- iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
- dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_alloc_push_page(iwqp);
spin_lock_irqsave(&iwqp->lock, flags);
@@ -1318,6 +1342,7 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
if (iwqp->iwarp_state == info.curr_iwarp_state) {
iwqp->iwarp_state = info.next_iwarp_state;
iwqp->ibqp_state = attr->qp_state;
+ iwqp->sc_qp.qp_state = iwqp->iwarp_state;
}
spin_unlock_irqrestore(&iwqp->lock, flags);
}
@@ -1337,7 +1362,7 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
if (iwqp->cm_node) {
- atomic_inc(&iwqp->cm_node->refcnt);
+ irdma_add_ref_cmnode(iwqp->cm_node);
spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
if (iwqp->cm_id && close_timer_started == 1)
@@ -1345,7 +1370,7 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
(struct irdma_puda_buf *)iwqp,
IRDMA_TIMER_TYPE_CLOSE, 1, 0);
- irdma_rem_ref_cm_node(iwqp->cm_node);
+ irdma_rem_ref_cmnode(iwqp->cm_node);
} else {
spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
}
@@ -1482,11 +1507,11 @@ irdma_resize_cq(struct ib_cq *ibcq, int entries,
if (!iwcq->user_mode) {
entries++;
- if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
entries *= 2;
}
- info.cq_size = max(entries, 4);
+ info.cq_size = max_t(int, entries, 4);
if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
return 0;
@@ -1557,6 +1582,7 @@ irdma_resize_cq(struct ib_cq *ibcq, int entries,
cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
cqp_info->post_sq = 1;
+ cqp_info->create = true;
ret = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
if (ret)
@@ -1668,7 +1694,7 @@ irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
u32 pg_idx;
for (pg_idx = 0; pg_idx < npages; pg_idx++) {
- if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
+ if ((*arr + ((u64)pg_size * pg_idx)) != arr[pg_idx])
return false;
}
@@ -1835,6 +1861,44 @@ irdma_handle_q_mem(struct irdma_device *iwdev,
}
/**
+ * irdma_hw_alloc_mw - create the hw memory window
+ * @iwdev: irdma device
+ * @iwmr: pointer to memory window info
+ */
+int
+irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
+{
+ struct irdma_mw_alloc_info *info;
+ struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.mw_alloc.info;
+ if (iwmr->ibmw.type == IB_MW_TYPE_1)
+ info->mw_wide = true;
+
+ info->page_size = PAGE_SIZE;
+ info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->remote_access = true;
+ cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
* irdma_hw_alloc_stag - cqp command to allocate stag
* @iwdev: irdma device
* @iwmr: irdma mr pointer
@@ -1856,7 +1920,6 @@ irdma_hw_alloc_stag(struct irdma_device *iwdev,
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.alloc_stag.info;
- memset(info, 0, sizeof(*info));
info->page_size = PAGE_SIZE;
info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
info->pd_id = iwpd->sc_pd.pd_id;
@@ -1867,6 +1930,7 @@ irdma_hw_alloc_stag(struct irdma_device *iwdev,
cqp_info->post_sq = 1;
cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
if (!status)
@@ -1948,12 +2012,14 @@ irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
cqp_info = &cqp_request->info;
stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
- memset(stag_info, 0, sizeof(*stag_info));
stag_info->va = iwpbl->user_base;
stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
stag_info->stag_key = (u8)iwmr->stag;
stag_info->total_len = iwmr->len;
- stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
+ if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) || iwmr->dma_mr)
+ stag_info->all_memory = true;
+ else
+ stag_info->all_memory = false;
stag_info->access_rights = irdma_get_mr_access(access,
iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
stag_info->pd_id = iwpd->sc_pd.pd_id;
@@ -1979,6 +2045,7 @@ irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
cqp_info->post_sq = 1;
cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
+ cqp_info->create = true;
ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
@@ -1992,7 +2059,7 @@ irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
* irdma_alloc_iwmr - Allocate iwmr @region - memory region @pd - protection domain @virt - virtual address @reg_type -
* registration type
*/
-static struct irdma_mr *
+struct irdma_mr *
irdma_alloc_iwmr(struct ib_umem *region,
struct ib_pd *pd, u64 virt,
enum irdma_memreg_type reg_type)
@@ -2022,19 +2089,17 @@ irdma_alloc_iwmr(struct ib_umem *region,
return iwmr;
}
-static void
+void
irdma_free_iwmr(struct irdma_mr *iwmr)
{
kfree(iwmr);
}
/*
- * irdma_reg_user_mr_type_mem - Handle memory registration
- * @iwmr - irdma mr
- * @access - access rights
- * @create_stag - flag to create stag or not
+ * irdma_reg_user_mr_type_mem - Handle memory registration @iwmr - irdma mr @access - access rights @create_stag - flag
+ * to create stag or not
*/
-static int
+int
irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
bool create_stag)
{
@@ -2091,7 +2156,7 @@ free_pble:
/*
* irdma_reg_user_mr_type_qp - Handle QP memory registration @req - memory reg req @udata - user info @iwmr - irdma mr
*/
-static int
+int
irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
struct ib_udata *udata,
struct irdma_mr *iwmr)
@@ -2104,6 +2169,11 @@ irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
int err;
u8 lvl;
+ /* iWarp: Catch page not starting on OS page boundary */
+ if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
+ ib_umem_offset(iwmr->region))
+ return -EINVAL;
+
total = req.sq_pages + req.rq_pages + IRDMA_SHADOW_PGCNT;
if (total > iwmr->page_cnt)
return -EINVAL;
@@ -2126,7 +2196,7 @@ irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
/*
* irdma_reg_user_mr_type_cq - Handle CQ memory registration @req - memory reg req @udata - user info @iwmr - irdma mr
*/
-static int
+int
irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
struct ib_udata *udata,
struct irdma_mr *iwmr)
@@ -2158,85 +2228,6 @@ irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
return 0;
}
-/**
- * irdma_reg_user_mr - Register a user memory region
- * @pd: ptr of pd
- * @start: virtual start address
- * @len: length of mr
- * @virt: virtual address
- * @access: access of mr
- * @udata: user data
- */
-static struct ib_mr *
-irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
- u64 virt, int access,
- struct ib_udata *udata)
-{
-#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
- struct irdma_device *iwdev = to_iwdev(pd->device);
- struct irdma_mem_reg_req req = {};
- struct ib_umem *region;
- struct irdma_mr *iwmr;
- int err;
-
- if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
- return ERR_PTR(-EINVAL);
-
- if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
- return ERR_PTR(-EINVAL);
-
- region = ib_umem_get(pd->uobject->context, start, len, access, 0);
-
- if (IS_ERR(region)) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "Failed to create ib_umem region\n");
- return (struct ib_mr *)region;
- }
-
- if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
- ib_umem_release(region);
- return ERR_PTR(-EFAULT);
- }
-
- iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
- if (IS_ERR(iwmr)) {
- ib_umem_release(region);
- return (struct ib_mr *)iwmr;
- }
-
- switch (req.reg_type) {
- case IRDMA_MEMREG_TYPE_QP:
- err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
- if (err)
- goto error;
-
- break;
- case IRDMA_MEMREG_TYPE_CQ:
- err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
- if (err)
- goto error;
-
- break;
- case IRDMA_MEMREG_TYPE_MEM:
- err = irdma_reg_user_mr_type_mem(iwmr, access, true);
- if (err)
- goto error;
-
- break;
- default:
- err = -EINVAL;
- goto error;
- }
-
- return &iwmr->ibmr;
-
-error:
- ib_umem_release(region);
- irdma_free_iwmr(iwmr);
-
- return ERR_PTR(err);
-}
-
int
irdma_hwdereg_mr(struct ib_mr *ib_mr)
{
@@ -2262,10 +2253,11 @@ irdma_hwdereg_mr(struct ib_mr *ib_mr)
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.dealloc_stag.info;
- memset(info, 0, sizeof(*info));
info->pd_id = iwpd->sc_pd.pd_id;
info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
info->mr = true;
+ if (iwmr->type != IRDMA_MEMREG_TYPE_MEM)
+ info->skip_flush_markers = true;
if (iwpbl->pbl_allocated)
info->dealloc_pbl = true;
@@ -2303,7 +2295,8 @@ irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
if (IS_ERR(region)) {
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "Failed to create ib_umem region\n");
+ "Failed to create ib_umem region err=%ld\n",
+ PTR_ERR(region));
return (struct ib_mr *)region;
}
@@ -2335,10 +2328,11 @@ err:
* @size: size of memory to register
* @access: Access rights
* @iova_start: start of virtual address for physical buffers
+ * @dma_mr: Flag indicating DMA Mem region
*/
struct ib_mr *
irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
- u64 *iova_start)
+ u64 *iova_start, bool dma_mr)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_pbl *iwpbl;
@@ -2355,6 +2349,7 @@ irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
iwpbl = &iwmr->iwpbl;
iwpbl->iwmr = iwmr;
iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ iwmr->dma_mr = dma_mr;
iwpbl->user_base = *iova_start;
stag = irdma_create_stag(iwdev);
if (!stag) {
@@ -2394,7 +2389,7 @@ irdma_get_dma_mr(struct ib_pd *pd, int acc)
{
u64 kva = 0;
- return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
+ return irdma_reg_phys_mr(pd, 0, 0, acc, &kva, true);
}
/**
@@ -2547,7 +2542,7 @@ irdma_post_send(struct ib_qp *ibqp,
break;
case IB_WR_LOCAL_INV:
info.op_type = IRDMA_OP_TYPE_INV_STAG;
- info.local_fence = info.read_fence;
+ info.local_fence = true;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
break;
@@ -2593,7 +2588,7 @@ irdma_post_send(struct ib_qp *ibqp,
ib_wr = ib_wr->next;
}
- if (!iwqp->flush_issued) {
+ if (!atomic_read(&iwqp->flush_issued)) {
if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
irdma_uk_qp_post_wr(ukqp);
spin_unlock_irqrestore(&iwqp->lock, flags);
@@ -2641,13 +2636,13 @@ irdma_post_recv(struct ib_qp *ibqp,
"post_recv err %d\n", err);
goto out;
}
-
ib_wr = ib_wr->next;
}
out:
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (iwqp->flush_issued)
+
+ if (atomic_read(&iwqp->flush_issued))
irdma_sched_qp_flush_work(iwqp);
if (err)
@@ -2934,7 +2929,7 @@ irdma_req_notify_cq(struct ib_cq *ibcq,
}
if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
+ (!irdma_uk_cq_empty(&iwcq->sc_cq.cq_uk) || !list_empty(&iwcq->cmpl_generated)))
ret = 1;
spin_unlock_irqrestore(&iwcq->lock, flags);
@@ -3012,6 +3007,10 @@ irdma_mcast_cqp_op(struct irdma_device *iwdev,
cqp_info->post_sq = 1;
cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
+
+ if (op == IRDMA_OP_MC_CREATE)
+ cqp_info->create = true;
+
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
diff --git a/sys/dev/irdma/irdma_verbs.h b/sys/dev/irdma/irdma_verbs.h
index 9a5b1cdb0381..d3f240783c3d 100644
--- a/sys/dev/irdma/irdma_verbs.h
+++ b/sys/dev/irdma/irdma_verbs.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,7 @@
#define IRDMA_MAX_SAVED_PHY_PGADDR 4
#define IRDMA_FLUSH_DELAY_MS 20
+#define IRDMA_PERIODIC_FLUSH_MS 2000
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
@@ -141,6 +142,7 @@ struct irdma_mr {
int access;
u8 is_hwreg;
u16 type;
+ bool dma_mr:1;
u32 page_cnt;
u64 page_size;
u64 page_msk;
@@ -154,21 +156,15 @@ struct irdma_mr {
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
- u16 cq_head;
- u16 cq_size;
- u16 cq_num;
+ u32 cq_num;
bool user_mode;
atomic_t armed;
enum irdma_cmpl_notify last_notify;
- u32 polled_cmpls;
- u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
struct completion free_cq;
atomic_t refcnt;
spinlock_t lock; /* for poll cq */
- struct irdma_pbl *iwpbl;
- struct irdma_pbl *iwpbl_shadow;
struct list_head resize_list;
struct irdma_cq_poll_info cur_cqe;
struct list_head cmpl_generated;
@@ -244,10 +240,12 @@ struct irdma_qp {
dma_addr_t pbl_pbase;
struct page *page;
u8 iwarp_state;
+ atomic_t flush_issued;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
u8 hw_iwarp_state;
u8 hw_tcp_state;
+ u8 ae_src;
struct irdma_qp_kmode kqp;
struct irdma_dma_mem host_ctx;
struct timer_list terminate_timer;
@@ -262,7 +260,6 @@ struct irdma_qp {
bool active_conn:1;
bool user_mode:1;
bool hte_added:1;
- bool flush_issued:1;
bool sig_all:1;
bool pau_mode:1;
bool suspend_pending:1;
@@ -385,6 +382,12 @@ static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
struct rdma_user_mmap_entry*
irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
+struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
+ struct ib_pd *pd, u64 virt,
+ enum irdma_memreg_type reg_type);
+void irdma_free_iwmr(struct irdma_mr *iwmr);
+int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
+ bool create_stag);
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);
void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
@@ -392,5 +395,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp);
void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
void irdma_sched_qp_flush_work(struct irdma_qp *iwqp);
-void irdma_flush_worker(struct work_struct *work);
+void irdma_kern_flush_worker(struct work_struct *work);
+void irdma_user_flush_worker(struct work_struct *work);
+int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr);
#endif /* IRDMA_VERBS_H */
diff --git a/sys/dev/irdma/irdma_ws.c b/sys/dev/irdma/irdma_ws.c
index d311343111b9..af781f23a22c 100644
--- a/sys/dev/irdma/irdma_ws.c
+++ b/sys/dev/irdma/irdma_ws.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2023 Intel Corporation
+ * Copyright (c) 2017 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -118,10 +118,11 @@ irdma_free_node(struct irdma_sc_vsi *vsi,
* @vsi: vsi pointer
* @node: pointer to node
* @cmd: add, remove or modify
+ * @qs_handle: Pointer to store the qs_handle for a leaf node
*/
static int
irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *node, u8 cmd)
+ struct irdma_ws_node *node, u8 cmd, u16 *qs_handle)
{
struct irdma_ws_node_info node_info = {0};
@@ -142,10 +143,8 @@ irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi,
return -ENOMEM;
}
- if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
- node->qs_handle = node_info.qs_handle;
- vsi->qos[node->user_pri].qs_handle = node_info.qs_handle;
- }
+ if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE && qs_handle)
+ *qs_handle = node_info.qs_handle;
return 0;
}
@@ -193,11 +192,8 @@ irdma_ws_in_use(struct irdma_sc_vsi *vsi, u8 user_pri)
{
int i;
- mutex_lock(&vsi->qos[user_pri].qos_mutex);
- if (!list_empty(&vsi->qos[user_pri].qplist)) {
- mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+ if (!list_empty(&vsi->qos[user_pri].qplist))
return true;
- }
/*
* Check if the qs handle associated with the given user priority is in use by any other user priority. If so,
@@ -205,12 +201,9 @@ irdma_ws_in_use(struct irdma_sc_vsi *vsi, u8 user_pri)
*/
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
if (vsi->qos[i].qs_handle == vsi->qos[user_pri].qs_handle &&
- !list_empty(&vsi->qos[i].qplist)) {
- mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+ !list_empty(&vsi->qos[i].qplist))
return true;
- }
}
- mutex_unlock(&vsi->qos[user_pri].qos_mutex);
return false;
}
@@ -228,9 +221,10 @@ irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
int i;
qs_handle = vsi->qos[user_pri].qs_handle;
- for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
if (vsi->qos[i].qs_handle == qs_handle)
vsi->qos[i].valid = false;
+ }
ws_tree_root = vsi->dev->ws_tree_root;
if (!ws_tree_root)
@@ -247,25 +241,93 @@ irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
if (!tc_node)
return;
- irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
- vsi->unregister_qset(vsi, tc_node);
list_del(&tc_node->siblings);
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE, NULL);
+
+ vsi->unregister_qset(vsi, tc_node);
irdma_free_node(vsi, tc_node);
/* Check if VSI node can be freed */
if (list_empty(&vsi_node->child_list_head)) {
- irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE);
+ irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE, NULL);
list_del(&vsi_node->siblings);
irdma_free_node(vsi, vsi_node);
/* Free head node there are no remaining VSI nodes */
if (list_empty(&ws_tree_root->child_list_head)) {
irdma_ws_cqp_cmd(vsi, ws_tree_root,
- IRDMA_OP_WS_DELETE_NODE);
+ IRDMA_OP_WS_DELETE_NODE, NULL);
irdma_free_node(vsi, ws_tree_root);
vsi->dev->ws_tree_root = NULL;
}
}
}
+static int
+irdma_enable_leaf(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ int ret;
+
+ ret = vsi->register_qset(vsi, tc_node);
+ if (ret)
+ return ret;
+
+ tc_node->enable = true;
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE, NULL);
+ if (ret)
+ goto enable_err;
+ return 0;
+
+enable_err:
+ vsi->unregister_qset(vsi, tc_node);
+
+ return ret;
+}
+
+static struct irdma_ws_node *
+irdma_add_leaf_node(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *vsi_node,
+ u8 user_pri, u16 traffic_class)
+{
+ struct irdma_ws_node *tc_node =
+ irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF, vsi_node);
+ int i, ret = 0;
+
+ if (!tc_node)
+ return NULL;
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_ADD_NODE, &tc_node->qs_handle);
+ if (ret) {
+ irdma_free_node(vsi, tc_node);
+ return NULL;
+ }
+ vsi->qos[tc_node->user_pri].qs_handle = tc_node->qs_handle;
+
+ list_add(&tc_node->siblings, &vsi_node->child_list_head);
+
+ ret = irdma_enable_leaf(vsi, tc_node);
+ if (ret)
+ goto reg_err;
+
+ /*
+ * Iterate through other UPs and update the QS handle if they have a matching traffic class.
+ */
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->qos[i].traffic_class == traffic_class) {
+ vsi->qos[i].qs_handle = tc_node->qs_handle;
+ vsi->qos[i].l2_sched_node_id =
+ tc_node->l2_sched_node_id;
+ vsi->qos[i].valid = true;
+ }
+ }
+ return tc_node;
+
+reg_err:
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE, NULL);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
+
+ return NULL;
+}
+
/**
* irdma_ws_add - Build work scheduler tree, set RDMA qs_handle
* @vsi: vsi pointer
@@ -279,7 +341,6 @@ irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
struct irdma_ws_node *tc_node;
u16 traffic_class;
int ret = 0;
- int i;
mutex_lock(&vsi->dev->ws_mutex);
if (vsi->tc_change_pending) {
@@ -298,9 +359,11 @@ irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
ret = -ENOMEM;
goto exit;
}
- irdma_debug(vsi->dev, IRDMA_DEBUG_WS, "Creating root node = %d\n", ws_tree_root->index);
+ irdma_debug(vsi->dev, IRDMA_DEBUG_WS,
+ "Creating root node = %d\n", ws_tree_root->index);
- ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE);
+ ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE,
+ NULL);
if (ret) {
irdma_free_node(vsi, ws_tree_root);
goto exit;
@@ -324,7 +387,8 @@ irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
goto vsi_add_err;
}
- ret = irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_ADD_NODE);
+ ret = irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_ADD_NODE,
+ NULL);
if (ret) {
irdma_free_node(vsi, vsi_node);
goto vsi_add_err;
@@ -344,56 +408,22 @@ irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
irdma_debug(vsi->dev, IRDMA_DEBUG_WS,
"Node not found matching VSI %d and TC %d\n",
vsi->vsi_idx, traffic_class);
- tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF,
- vsi_node);
+ tc_node = irdma_add_leaf_node(vsi, vsi_node, user_pri,
+ traffic_class);
if (!tc_node) {
ret = -ENOMEM;
goto leaf_add_err;
}
-
- ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_ADD_NODE);
- if (ret) {
- irdma_free_node(vsi, tc_node);
- goto leaf_add_err;
- }
-
- list_add(&tc_node->siblings, &vsi_node->child_list_head);
- /*
- * callback to LAN to update the LAN tree with our node
- */
- ret = vsi->register_qset(vsi, tc_node);
- if (ret)
- goto reg_err;
-
- tc_node->enable = true;
- ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
- if (ret) {
- vsi->unregister_qset(vsi, tc_node);
- goto reg_err;
- }
}
irdma_debug(vsi->dev, IRDMA_DEBUG_WS,
"Using node %d which represents VSI %d TC %d\n",
tc_node->index, vsi->vsi_idx, traffic_class);
- /*
- * Iterate through other UPs and update the QS handle if they have a matching traffic class.
- */
- for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
- if (vsi->qos[i].traffic_class == traffic_class) {
- vsi->qos[i].qs_handle = tc_node->qs_handle;
- vsi->qos[i].l2_sched_node_id = tc_node->l2_sched_node_id;
- vsi->qos[i].valid = true;
- }
- }
goto exit;
-reg_err:
- irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
- list_del(&tc_node->siblings);
- irdma_free_node(vsi, tc_node);
leaf_add_err:
if (list_empty(&vsi_node->child_list_head)) {
- if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
+ if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE,
+ NULL))
goto exit;
list_del(&vsi_node->siblings);
irdma_free_node(vsi, vsi_node);
@@ -402,7 +432,8 @@ leaf_add_err:
vsi_add_err:
/* Free head node there are no remaining VSI nodes */
if (list_empty(&ws_tree_root->child_list_head)) {
- irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE);
+ irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE,
+ NULL);
vsi->dev->ws_tree_root = NULL;
irdma_free_node(vsi, ws_tree_root);
}
@@ -420,12 +451,14 @@ exit:
void
irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
{
+ mutex_lock(&vsi->qos[user_pri].qos_mutex);
mutex_lock(&vsi->dev->ws_mutex);
if (irdma_ws_in_use(vsi, user_pri))
goto exit;
irdma_remove_leaf(vsi, user_pri);
exit:
mutex_unlock(&vsi->dev->ws_mutex);
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
}
/**
diff --git a/sys/dev/irdma/osdep.h b/sys/dev/irdma/osdep.h
index 831bd50f3ae4..eb73ffbd30e2 100644
--- a/sys/dev/irdma/osdep.h
+++ b/sys/dev/irdma/osdep.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2023 Intel Corporation
+ * Copyright (c) 2021 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -85,13 +85,18 @@
#define STATS_TIMER_DELAY 60000
/* a couple of linux size defines */
-#define SZ_128 128
+#define SZ_128 128
#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_5000 5000
#define SPEED_10000 10000
+#define SPEED_14000 14000
#define SPEED_20000 20000
#define SPEED_25000 25000
#define SPEED_40000 40000
+#define SPEED_50000 50000
#define SPEED_100000 100000
+#define SPEED_200000 200000
#define irdma_mb() mb()
#define irdma_wmb() wmb()
diff --git a/sys/dev/isci/scil/scic_sds_phy_registers.h b/sys/dev/isci/scil/scic_sds_phy_registers.h
index c0467325eb3b..29b04133d19b 100644
--- a/sys/dev/isci/scil/scic_sds_phy_registers.h
+++ b/sys/dev/isci/scil/scic_sds_phy_registers.h
@@ -137,7 +137,7 @@ extern "C" {
//*****************************************************************************
/**
- * THis macro requests the SCU register write for the specified link layer
+ * This macro requests the SCU register write for the specified link layer
* register.
*/
#define scu_link_layer_register_read(phy, reg) \
diff --git a/sys/dev/isci/scil/scic_sds_remote_node_table.c b/sys/dev/isci/scil/scic_sds_remote_node_table.c
index a29cbefa5057..e447d5d952af 100644
--- a/sys/dev/isci/scil/scic_sds_remote_node_table.c
+++ b/sys/dev/isci/scil/scic_sds_remote_node_table.c
@@ -276,7 +276,7 @@ void scic_sds_remote_node_table_clear_group(
}
/**
- * THis method sets an entire remote node group in the remote node table.
+ * This method sets an entire remote node group in the remote node table.
*
* @param[in] remote_node_table
* @param[in] group_index
@@ -471,7 +471,7 @@ U16 scic_sds_remote_node_table_allocate_single_remote_node(
*
* @param[in] remote_node_table This is the remote node table from which to
* allocate the remote node entries.
- * @param[in] group_table_index THis is the group table index which must equal
+ * @param[in] group_table_index This is the group table index which must equal
* two (2) for this operation.
*
* @return The remote node index that represents three consecutive remote node
diff --git a/sys/dev/isci/scil/scic_sds_stp_request.h b/sys/dev/isci/scil/scic_sds_stp_request.h
index e8a64868a3b1..8cf60c8dd1b3 100644
--- a/sys/dev/isci/scil/scic_sds_stp_request.h
+++ b/sys/dev/isci/scil/scic_sds_stp_request.h
@@ -175,7 +175,7 @@ enum SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_SUBSTATES
/**
* @enum SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_SUBSTATES
*
- * @brief THis enumeration depicts the various sub-states associated with a
+ * @brief This enumeration depicts the various sub-states associated with a
* SATA/STP soft reset operation.
*/
enum SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_SUBSTATES
diff --git a/sys/dev/isci/scil/scif_remote_device.h b/sys/dev/isci/scil/scif_remote_device.h
index b6ac032c8234..4c21654ce42a 100644
--- a/sys/dev/isci/scil/scif_remote_device.h
+++ b/sys/dev/isci/scil/scif_remote_device.h
@@ -240,7 +240,7 @@ SCI_REMOTE_DEVICE_HANDLE_T scif_remote_device_get_scic_handle(
/**
* @brief This method returns the maximum queue depth supported for the
- * supplied target by this SCI Framework impementation.
+ * supplied target by this SCI Framework implementation.
*
* @param[in] remote_device This parameter specifies the framework
* device for which to return the maximum queue depth.
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 1d36fd11f368..b9d88fcab523 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -36,10 +36,14 @@
#include "opt_rss.h"
#include "ixgbe.h"
+#include "mdio_if.h"
#include "ixgbe_sriov.h"
#include "ifdi_if.h"
+#include "if_ix_mdio_hw.h"
+#include "if_ix_mdio.h"
#include <net/netmap.h>
+#include <dev/mdio/mdio.h>
#include <dev/netmap/netmap_kern.h>
/************************************************************************
@@ -298,6 +302,10 @@ static device_method_t ix_methods[] = {
DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
#endif /* PCI_IOV */
+ DEVMETHOD(bus_add_child, device_add_child_ordered),
+ DEVMETHOD(mdio_readreg, ixgbe_mdio_readreg_c22),
+ DEVMETHOD(mdio_writereg, ixgbe_mdio_writereg_c22),
+
DEVMETHOD_END
};
@@ -305,11 +313,13 @@ static driver_t ix_driver = {
"ix", ix_methods, sizeof(struct ixgbe_softc),
};
-DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
+DRIVER_MODULE(mdio, ix, mdio_driver, 0, 0); /* needs to happen before ix */
+DRIVER_MODULE_ORDERED(ix, pci, ix_driver, NULL, NULL, SI_ORDER_ANY); /* needs to be last */
IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
MODULE_DEPEND(ix, pci, 1, 1, 1);
MODULE_DEPEND(ix, ether, 1, 1, 1);
MODULE_DEPEND(ix, iflib, 1, 1, 1);
+MODULE_DEPEND(ix, mdio, 1, 1, 1);
static device_method_t ixgbe_if_methods[] = {
DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
@@ -709,7 +719,7 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
RSS_HASHTYPE_RSS_TCP_IPV6_EX;
}
- mrqc = IXGBE_MRQC_RSSEN;
+ mrqc = ixgbe_get_mrqc(sc->iov_mode);
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
@@ -728,7 +738,7 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
- mrqc |= ixgbe_get_mrqc(sc->iov_mode);
+
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
} /* ixgbe_initialize_rss_mapping */
@@ -1066,6 +1076,10 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
/* Ensure SW/FW semaphore is free */
ixgbe_init_swfw_semaphore(hw);
+ /* Enable EEE power saving */
+ if (sc->feat_en & IXGBE_FEATURE_EEE)
+ hw->mac.ops.setup_eee(hw, true);
+
/* Set an initial default flow control value */
hw->fc.requested_mode = ixgbe_flow_control;
@@ -1118,10 +1132,13 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
break;
}
- /* Check the FW API version */
- if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
- error = EIO;
- goto err_pci;
+ /* Check the FW API version and enable FW logging support for E610 */
+ if (hw->mac.type == ixgbe_mac_E610) {
+ if (ixgbe_check_fw_api_version(sc)) {
+ error = EIO;
+ goto err_pci;
+ }
+ ixgbe_fwlog_set_support_ena(hw);
}
/* Most of the iflib initialization... */
@@ -1267,6 +1284,9 @@ ixgbe_if_attach_post(if_ctx_t ctx)
/* Add sysctls */
ixgbe_add_device_sysctls(ctx);
+ /* Add MDIO bus if required / supported */
+ ixgbe_mdio_attach(sc);
+
/* Init recovery mode timer and state variable */
if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
sc->recovery_mode = 0;
@@ -3395,6 +3415,9 @@ ixgbe_add_debug_sysctls(struct ixgbe_softc *sc)
if (sc->feat_en & IXGBE_FEATURE_DBG_DUMP)
ixgbe_add_debug_dump_sysctls(sc);
+
+ if (sc->feat_en & IXGBE_FEATURE_FW_LOGGING)
+ ixgbe_add_fw_logging_tunables(sc, sc->debug_sysctls);
} /* ixgbe_add_debug_sysctls */
/************************************************************************
@@ -4491,6 +4514,10 @@ ixgbe_handle_fw_event(void *context)
sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
break;
+ case ixgbe_aci_opc_fw_logs_event:
+ ixgbe_fwlog_event_dump(&sc->hw, &event.desc, event.msg_buf);
+ break;
+
case ixgbe_aci_opc_temp_tca_event:
if (hw->adapter_stopped == FALSE)
ixgbe_if_stop(ctx);
@@ -4589,6 +4616,20 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
"Link is up %s Full Duplex\n",
ixgbe_link_speed_to_str(sc->link_speed));
sc->link_active = true;
+
+ /* If link speed is <= 1Gbps and EEE is enabled,
+ * log info.
+ */
+ if (sc->hw.mac.type == ixgbe_mac_E610 &&
+ (sc->feat_en & IXGBE_FEATURE_EEE) &&
+ sc->link_speed <= IXGBE_LINK_SPEED_1GB_FULL) {
+ device_printf(sc->dev,
+ "Energy Efficient Ethernet (EEE) feature "
+ "is not supported on link speeds equal to "
+ "or below 1Gbps. EEE is supported on "
+ "speeds above 1Gbps.\n");
+ }
+
/* Update any Flow Control changes */
ixgbe_fc_enable(&sc->hw);
/* Update DMA coalescing config */
@@ -5582,6 +5623,17 @@ ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
if ((new_eee < 0) || (new_eee > 1))
return (EINVAL);
+ /* If link speed is <= 1Gbps and EEE is being enabled, log info */
+ if (sc->hw.mac.type == ixgbe_mac_E610 &&
+ new_eee &&
+ sc->link_speed <= IXGBE_LINK_SPEED_1GB_FULL) {
+ device_printf(dev,
+ "Energy Efficient Ethernet (EEE) feature is not "
+ "supported on link speeds equal to or below 1Gbps. "
+ "EEE is supported on speeds above 1Gbps.\n");
+ return (EINVAL);
+ }
+
retval = ixgbe_setup_eee(&sc->hw, new_eee);
if (retval) {
device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
@@ -5645,6 +5697,8 @@ ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
static void
ixgbe_init_device_features(struct ixgbe_softc *sc)
{
+ s32 error;
+
sc->feat_cap = IXGBE_FEATURE_NETMAP |
IXGBE_FEATURE_RSS |
IXGBE_FEATURE_MSI |
@@ -5700,6 +5754,10 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
case ixgbe_mac_E610:
sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
sc->feat_cap |= IXGBE_FEATURE_DBG_DUMP;
+ sc->feat_cap |= IXGBE_FEATURE_FW_LOGGING;
+ error = ixgbe_get_caps(&sc->hw);
+ if (error == 0 && sc->hw.func_caps.common_cap.eee_support != 0)
+ sc->feat_cap |= IXGBE_FEATURE_EEE;
break;
default:
break;
@@ -5724,6 +5782,9 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
/* FW Debug Dump */
if (sc->feat_cap & IXGBE_FEATURE_DBG_DUMP)
sc->feat_en |= IXGBE_FEATURE_DBG_DUMP;
+ /* FW Logging */
+ if (sc->feat_cap & IXGBE_FEATURE_FW_LOGGING)
+ sc->feat_en |= IXGBE_FEATURE_FW_LOGGING;
/* Enabled via global sysctl... */
/* Flow Director */
diff --git a/sys/dev/ixgbe/if_ix_mdio.c b/sys/dev/ixgbe/if_ix_mdio.c
new file mode 100644
index 000000000000..3aa7ea80c3a7
--- /dev/null
+++ b/sys/dev/ixgbe/if_ix_mdio.c
@@ -0,0 +1,158 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+
+#include "ixgbe.h"
+#include "mdio_if.h"
+#include "ixgbe_sriov.h"
+#include "ifdi_if.h"
+#include "if_ix_mdio_hw.h"
+#include "if_ix_mdio.h"
+
+#include <dev/mdio/mdio.h>
+
+/**
+ * @brief Return if the given ixgbe chipset supports clause 22 MDIO bus access.
+ *
+ * Although technically all of the ixgbe chipsets support an MDIO
+ * bus interface, there's a bunch of factors controlling whether
+ * this should be exposed for external control.
+ *
+ * This functionr returns true if it supports an MDIO bus and
+ * clause 22 transactions.
+ */
+static bool
+ixgbe_has_mdio_bus_clause22(struct ixgbe_hw *hw)
+{
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ return (true);
+ }
+ return (false);
+}
+
+
+
+/**
+ * @brief Initiate a clause-22 MDIO read transfer.
+ *
+ * Note this is only officially supported for a small subset
+ * of NICs, notably the X552/X553 devices. This must not be
+ * called for other chipsets.
+ */
+int
+ixgbe_mdio_readreg_c22(device_t dev, int phy, int reg)
+{
+ if_ctx_t ctx = device_get_softc(dev);
+ struct sx *iflib_ctx_lock = iflib_ctx_lock_get(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ uint16_t val = 0;
+ int32_t ret = 0;
+
+ if (! ixgbe_has_mdio_bus_clause22(hw))
+ return (-1);
+
+ sx_xlock(iflib_ctx_lock);
+ ret = ixgbe_read_mdio_c22(hw, phy, reg, &val);
+ if (ret != IXGBE_SUCCESS) {
+ device_printf(dev, "%s: read_mdi_22 failed (%d)\n",
+ __func__, ret);
+ sx_xunlock(iflib_ctx_lock);
+ return (-1);
+ }
+ sx_xunlock(iflib_ctx_lock);
+ return (val);
+}
+
+/**
+ * @brief Initiate a clause-22 MDIO write transfer.
+ *
+ * Note this is only officially supported for a small subset
+ * of NICs, notably the X552/X553 devices. This must not be
+ * called for other chipsets.
+ */
+int
+ixgbe_mdio_writereg_c22(device_t dev, int phy, int reg, int data)
+{
+ if_ctx_t ctx = device_get_softc(dev);
+ struct sx *iflib_ctx_lock = iflib_ctx_lock_get(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ int32_t ret;
+
+ if (! ixgbe_has_mdio_bus_clause22(hw))
+ return (-1);
+
+ sx_xlock(iflib_ctx_lock);
+ ret = ixgbe_write_mdio_c22(hw, phy, reg, data);
+ if (ret != IXGBE_SUCCESS) {
+ device_printf(dev, "%s: write_mdi_22 failed (%d)\n",
+ __func__, ret);
+ sx_xunlock(iflib_ctx_lock);
+ return (-1);
+ }
+ sx_xunlock(iflib_ctx_lock);
+ return (0);
+}
+
+/**
+ * @brief Attach the MDIO bus if one exists.
+ */
+void
+ixgbe_mdio_attach(struct ixgbe_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ int enable_mdio = 0;
+
+ /*
+ * This explicitly needs to be enabled regardless of whether
+ * the device / instance supports an external MDIO bus.
+ */
+ if (resource_int_value(device_get_name(sc->dev),
+ device_get_unit(sc->dev), "enable_mdio", &enable_mdio) == 0) {
+ if (enable_mdio == 0)
+ return;
+ } else
+ return;
+
+ if (! ixgbe_has_mdio_bus_clause22(hw))
+ return;
+
+ device_add_child(sc->dev, "mdio", DEVICE_UNIT_ANY);
+ bus_attach_children(sc->dev);
+}
diff --git a/sys/dev/ixgbe/if_ix_mdio.h b/sys/dev/ixgbe/if_ix_mdio.h
new file mode 100644
index 000000000000..f9fe99275b2b
--- /dev/null
+++ b/sys/dev/ixgbe/if_ix_mdio.h
@@ -0,0 +1,34 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IF_IX_MDIO_H_
+#define _IF_IX_MDIO_H_
+
+int ixgbe_mdio_readreg_c22(device_t, int, int);
+int ixgbe_mdio_writereg_c22(device_t, int, int, int);
+void ixgbe_mdio_attach(struct ixgbe_softc *);
+
+#endif /* _IF_IX_MDIO_H */
diff --git a/sys/dev/ixgbe/if_ix_mdio_hw.c b/sys/dev/ixgbe/if_ix_mdio_hw.c
new file mode 100644
index 000000000000..581ed09f27e3
--- /dev/null
+++ b/sys/dev/ixgbe/if_ix_mdio_hw.c
@@ -0,0 +1,181 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2001-2024, Intel Corporation
+ All rights reserved.
+ Copyright (c) 2026 Adrian Chadd <adrian@FreeBSD.org>
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+#include "if_ix_mdio_hw.h"
+
+/*
+ * These routines are separate from the rest of ixgbe for now to make merging
+ * easier.
+ */
+
+static s32
+ixgbe_read_mdio_unlocked_c22(struct ixgbe_hw *hw, u16 phy, u16 reg, u16 *phy_data)
+{
+ u32 i, data, command;
+
+ /* Setup and write the read command */
+ command = (reg << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (phy << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+
+ if (command & IXGBE_MSCA_MDI_COMMAND)
+ return IXGBE_ERR_PHY;
+
+ /* Read operation is complete. Get the data from MSRWD */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)data;
+
+ return IXGBE_SUCCESS;
+}
+
+static s32
+ixgbe_write_mdio_unlocked_c22(struct ixgbe_hw *hw, u16 phy, u16 reg, u16 phy_data)
+{
+ u32 i, command;
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the write command */
+ command = (reg << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (phy << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+
+ if (command & IXGBE_MSCA_MDI_COMMAND)
+ return IXGBE_ERR_PHY;
+
+ return IXGBE_SUCCESS;
+}
+
+/*
+ * Return true if the MAC is an X55x backplane.
+ *
+ * These have a single MDIO PHY semaphore (PHY0) and also require the
+ * token semaphore.
+ */
+static bool
+ixgbe_check_mdio_is_x550em(struct ixgbe_hw *hw)
+{
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ return true;
+ default:
+ return false;
+ }
+}
+
+s32
+ixgbe_read_mdio_c22(struct ixgbe_hw *hw, u16 phy, u16 reg, u16 *phy_data)
+{
+ u32 gssr = hw->phy.phy_semaphore_mask;
+ s32 ret;
+
+ if (ixgbe_check_mdio_is_x550em(hw))
+ gssr |= IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_TOKEN_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) {
+ *phy_data = -1;
+ return IXGBE_ERR_TIMEOUT;
+ }
+
+ ret = ixgbe_read_mdio_unlocked_c22(hw, phy, reg, phy_data);
+ if (ret != IXGBE_SUCCESS)
+ *phy_data = -1;
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return ret;
+}
+
+s32
+ixgbe_write_mdio_c22(struct ixgbe_hw *hw, u16 phy, u16 reg, u16 data)
+{
+ u32 gssr = hw->phy.phy_semaphore_mask;
+ s32 ret;
+
+ if (ixgbe_check_mdio_is_x550em(hw))
+ gssr |= IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_TOKEN_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return IXGBE_ERR_TIMEOUT;
+
+ ret = ixgbe_write_mdio_unlocked_c22(hw, phy, reg, data);
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return ret;
+}
diff --git a/sys/dev/ixgbe/if_ix_mdio_hw.h b/sys/dev/ixgbe/if_ix_mdio_hw.h
new file mode 100644
index 000000000000..b2db5d431819
--- /dev/null
+++ b/sys/dev/ixgbe/if_ix_mdio_hw.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2026 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IF_IX_MDIO_HW_H_
+#define _IF_IX_MDIO_HW_H_
+
+s32 ixgbe_read_mdio_c22(struct ixgbe_hw *hw, u16 phy, u16 reg, u16 *phy_data);
+s32 ixgbe_write_mdio_c22(struct ixgbe_hw *hw, u16 phy, u16 reg, u16 data);
+
+#endif /* _IF_IX_MDIO_HW_H_ */
diff --git a/sys/dev/ixgbe/if_sriov.c b/sys/dev/ixgbe/if_sriov.c
index 1998cdb016f7..47f1a1279e2f 100644
--- a/sys/dev/ixgbe/if_sriov.c
+++ b/sys/dev/ixgbe/if_sriov.c
@@ -170,7 +170,7 @@ ixgbe_get_mrqc(int iov_mode)
mrqc = IXGBE_MRQC_VMDQRSS32EN;
break;
case IXGBE_NO_VM:
- mrqc = 0;
+ mrqc = IXGBE_MRQC_RSSEN;
break;
default:
panic("Unexpected SR-IOV mode %d", iov_mode);
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 624b71acabea..9120ca5a37ff 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -607,6 +607,11 @@ int ixgbe_setup_receive_structures(struct ixgbe_softc *);
void ixgbe_free_receive_structures(struct ixgbe_softc *);
int ixgbe_get_regs(SYSCTL_HANDLER_ARGS);
+void ixgbe_add_fw_logging_tunables(struct ixgbe_softc *sc,
+ struct sysctl_oid *parent);
+
+#define IXGBE_STR_BUF_LEN 32
+
#include "ixgbe_bypass.h"
#include "ixgbe_fdir.h"
#include "ixgbe_rss.h"
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index bff022585a03..9e827d2e5473 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -4631,11 +4631,11 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < timeout * 1000; i++) {
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_C))
break;
- msec_delay(1);
+ usec_delay(1);
}
/* For each command except "Apply Update" perform
diff --git a/sys/dev/ixgbe/ixgbe_e610.c b/sys/dev/ixgbe/ixgbe_e610.c
index 18c4612446e0..21066f95a16e 100644
--- a/sys/dev/ixgbe/ixgbe_e610.c
+++ b/sys/dev/ixgbe/ixgbe_e610.c
@@ -776,6 +776,10 @@ ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
DEBUGOUT2("%s: next_cluster_id_support = %d\n",
prefix, caps->next_cluster_id_support);
break;
+ case IXGBE_ACI_CAPS_EEE:
+ caps->eee_support = (u8)number;
+ DEBUGOUT2("%s: eee_support = %x\n", prefix, caps->eee_support);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -1332,6 +1336,7 @@ void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
cfg->link_fec_opt = caps->link_fec_options;
cfg->module_compliance_enforcement =
caps->module_compliance_enforcement;
+ cfg->eee_entry_delay = caps->eee_entry_delay;
}
/**
@@ -1351,10 +1356,12 @@ s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
{
struct ixgbe_aci_desc desc;
+ bool use_1p40_buff;
s32 status;
if (!cfg)
return IXGBE_ERR_PARAM;
+ use_1p40_buff = hw->func_caps.common_cap.eee_support != 0;
/* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
@@ -1364,8 +1371,18 @@ s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
- status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+ if (use_1p40_buff) {
+ status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+ } else {
+ struct ixgbe_aci_cmd_set_phy_cfg_data_pre_1_40 cfg_obsolete;
+
+ memcpy(&cfg_obsolete, cfg, sizeof(cfg_obsolete));
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &cfg_obsolete,
+ sizeof(cfg_obsolete));
+ }
+ /* even if the old buffer is used no need to worry about conversion */
if (!status)
hw->phy.curr_user_phy_cfg = *cfg;
@@ -1599,6 +1616,7 @@ s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
li->topo_media_conflict = link_data.topo_media_conflict;
li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
IXGBE_ACI_CFG_PACING_TYPE_M);
+ li->eee_status = link_data.eee_status;
/* update fc info */
tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
@@ -3812,6 +3830,492 @@ s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_fwlog_cache_cfg - Cache FW logging config
+ * @hw: pointer to the HW structure
+ * @cfg: config to cache
+ *
+ * Cache FW logging config.
+ */
+static void ixgbe_fwlog_cache_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_fwlog_cfg *cfg)
+{
+ hw->fwlog_cfg = *cfg;
+}
+
+/**
+ * ixgbe_fwlog_valid_module_entries - validate all the module entry IDs and
+ * log levels
+ * @hw: pointer to the HW structure
+ * @entries: entries to validate
+ * @num_entries: number of entries to validate
+ *
+ * Checks if all the module entry IDs and log levels are valid.
+ *
+ * Return: true if all the module entry IDs and log levels are valid,
+ * otherwise false.
+ */
+static bool ixgbe_fwlog_valid_module_entries(struct ixgbe_hw *hw,
+ struct ixgbe_fwlog_module_entry *entries,
+ u16 num_entries)
+{
+ u16 i;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ if (!entries) {
+ return false;
+ }
+
+ if (!num_entries) {
+ return false;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ struct ixgbe_fwlog_module_entry *entry = &entries[i];
+
+ if (entry->module_id >= IXGBE_ACI_FW_LOG_ID_MAX) {
+ return false;
+ }
+
+ if (entry->log_level >= IXGBE_FWLOG_LEVEL_INVALID) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_fwlog_valid_cfg - validate configuration
+ * @hw: pointer to the HW structure
+ * @cfg: config to validate
+ *
+ * Validate the entire configuration.
+ *
+ * Return: true if the entire configuration is valid, otherwise false.
+ */
+static bool ixgbe_fwlog_valid_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_fwlog_cfg *cfg)
+{
+ if (!cfg) {
+ return false;
+ }
+
+ if (cfg->log_resolution < IXGBE_ACI_FW_LOG_MIN_RESOLUTION ||
+ cfg->log_resolution > IXGBE_ACI_FW_LOG_MAX_RESOLUTION) {
+ return false;
+ }
+
+ if (!ixgbe_fwlog_valid_module_entries(hw, cfg->module_entries,
+ IXGBE_ACI_FW_LOG_ID_MAX))
+ return false;
+
+ return true;
+}
+
+/**
+ * ixgbe_fwlog_init - Initialize cached structures for tracking FW logging
+ * @hw: pointer to the HW structure
+ * @cfg: config used to initialize the cached structures
+ *
+ * Initialize cached structures for tracking FW logging
+ * Called on driver initialization and before calling
+ * ixgbe_init_hw(). Firmware logging will be configured based on these settings
+ * and also the PF will be registered on init.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_fwlog_init(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg)
+{
+ if (!ixgbe_fwlog_valid_cfg(hw, cfg))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fwlog_cache_cfg(hw, cfg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_fwlog_set - Set FW logging configuration
+ * @hw: pointer to the HW structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from ixgbe_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ *
+ * Set FW logging configuration using ACI command (0xFF30).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_aci_fwlog_set(struct ixgbe_hw *hw,
+ struct ixgbe_fwlog_module_entry *entries,
+ u16 num_entries, u16 options, u16 log_resolution)
+{
+ struct ixgbe_aci_cmd_fw_log_cfg_resp fw_modules[IXGBE_ACI_FW_LOG_ID_MAX];
+ struct ixgbe_aci_cmd_fw_log *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+ u16 i;
+
+ if (num_entries > IXGBE_ACI_FW_LOG_ID_MAX)
+ return IXGBE_ERR_PARAM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ IXGBE_CPU_TO_LE16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_fw_logs_config);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = IXGBE_ACI_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = IXGBE_CPU_TO_LE16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = IXGBE_CPU_TO_LE16(num_entries);
+
+ if (options & IXGBE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= IXGBE_ACI_FW_LOG_CONF_AQ_EN;
+ if (options & IXGBE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= IXGBE_ACI_FW_LOG_CONF_UART_EN;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries);
+
+ return status;
+}
+
+/**
+ * ixgbe_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @hw: pointer to the HW structure
+ *
+ * This will always return false if called before ixgbe_init_hw(), so it must be
+ * called after ixgbe_init_hw().
+ *
+ * Return: true if FW supports FW logging.
+ * If this function is called before ixgbe_init_hw(), return false.
+ */
+bool ixgbe_fwlog_supported(struct ixgbe_hw *hw)
+{
+ return hw->fwlog_support_ena;
+}
+
+/**
+ * ixgbe_fwlog_set - Set the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config used to set firmware logging
+ *
+ * Call this function whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * ixgbe_fwlog_register. Note, that ixgbe_fwlog_register does not need to
+ * be called for init.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_fwlog_set(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg)
+{
+ s32 status;
+
+ if (!ixgbe_fwlog_supported(hw))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ if (!ixgbe_fwlog_valid_cfg(hw, cfg))
+ return IXGBE_ERR_PARAM;
+
+ status = ixgbe_aci_fwlog_set(hw, cfg->module_entries,
+ IXGBE_ACI_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+ if (!status)
+ ixgbe_fwlog_cache_cfg(hw, cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_fwlog_update_cached_entries - Update module entries in cached
+ * FW logging config
+ * @hw: pointer to the HW structure
+ * @entries: entries to cache
+ * @num_entries: number of @entries
+ *
+ * Update module entries in cached FW logging config.
+ */
+static void ixgbe_fwlog_update_cached_entries(struct ixgbe_hw *hw,
+ struct ixgbe_fwlog_module_entry *entries,
+ u16 num_entries)
+{
+ u16 i;
+
+ for (i = 0; i < num_entries; i++) {
+ struct ixgbe_fwlog_module_entry *updated = &entries[i];
+ u16 j;
+
+ for (j = 0; j < IXGBE_ACI_FW_LOG_ID_MAX; j++) {
+ struct ixgbe_fwlog_module_entry *cached =
+ &hw->fwlog_cfg.module_entries[j];
+
+ if (cached->module_id == updated->module_id) {
+ cached->log_level = updated->log_level;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * ixgbe_fwlog_update_modules - Update the log level 1 or more
+ * FW logging modules
+ * @hw: pointer to the HW structure
+ * @entries: array of ixgbe_fwlog_module_entry(s)
+ * @num_entries: number of entries
+ *
+ * Update the log level of 1 or more FW logging modules via module ID.
+ *
+ * Only the entries passed in will be affected. All other firmware logging
+ * settings will be unaffected.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_fwlog_update_modules(struct ixgbe_hw *hw,
+ struct ixgbe_fwlog_module_entry *entries,
+ u16 num_entries)
+{
+ struct ixgbe_fwlog_cfg cfg;
+ s32 status;
+
+ if (!ixgbe_fwlog_supported(hw))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ if (num_entries > IXGBE_ACI_FW_LOG_ID_MAX)
+ return IXGBE_ERR_PARAM;
+
+ if (!ixgbe_fwlog_valid_module_entries(hw, entries, num_entries))
+ return IXGBE_ERR_PARAM;
+
+ status = ixgbe_fwlog_get(hw, &cfg);
+ if (status)
+ goto status_out;
+
+ status = ixgbe_aci_fwlog_set(hw, entries, num_entries, cfg.options,
+ cfg.log_resolution);
+ if (!status)
+ ixgbe_fwlog_update_cached_entries(hw, entries, num_entries);
+
+status_out:
+ return status;
+}
+
+/**
+ * ixgbe_aci_fwlog_register - Register PF for firmware logging events.
+ * @hw: pointer to the HW structure
+ * @reg: true to register and false to unregister
+ *
+ * Register a PF for firmware logging events using ACI command (0xFF31).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_aci_fwlog_register(struct ixgbe_hw *hw, bool reg)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_fw_logs_register);
+
+ if (reg)
+ desc.params.fw_log.cmd_flags = IXGBE_ACI_FW_LOG_AQ_REGISTER;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_fwlog_register - Register the PF for firmware logging
+ * @hw: pointer to the HW structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in ixgbe_fwlog_set.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_fwlog_register(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ if (!ixgbe_fwlog_supported(hw))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ status = ixgbe_aci_fwlog_register(hw, true);
+
+ if (!status)
+ hw->fwlog_cfg.options |= IXGBE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ixgbe_fwlog_unregister - Unregister the PF from firmware logging
+ * @hw: pointer to the HW structure
+ *
+ * Make an attempt to unregister the PF from firmware logging.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_fwlog_unregister(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ if (!ixgbe_fwlog_supported(hw))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ status = ixgbe_aci_fwlog_register(hw, false);
+ if (!status)
+ hw->fwlog_cfg.options &= ~IXGBE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_fwlog_get - Get the current firmware logging configuration
+ * @hw: pointer to the HW structure
+ * @cfg: firmware logging configuration to populate
+ *
+ * Make an attempt to get the current firmware logging
+ * configuration using ACI command (0xFF32).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_aci_fwlog_get(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg)
+{
+ struct ixgbe_aci_cmd_fw_log_cfg_resp *fw_modules;
+ struct ixgbe_aci_cmd_fw_log *cmd;
+ struct ixgbe_aci_desc desc;
+ u16 i, module_id_cnt;
+ u8 *buf = NULL;
+ s32 status;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_fw_logs_query);
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = IXGBE_ACI_FW_LOG_AQ_QUERY;
+
+ buf = (u8 *)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (!buf)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (status) {
+ goto status_out;
+ }
+
+ module_id_cnt = IXGBE_LE16_TO_CPU(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt > IXGBE_ACI_FW_LOG_ID_MAX) {
+ module_id_cnt = IXGBE_ACI_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = (u8)IXGBE_LE16_TO_CPU(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & IXGBE_ACI_FW_LOG_CONF_AQ_EN)
+ cfg->options |= IXGBE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & IXGBE_ACI_FW_LOG_CONF_UART_EN)
+ cfg->options |= IXGBE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & IXGBE_ACI_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= IXGBE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct ixgbe_aci_cmd_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct ixgbe_aci_cmd_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ IXGBE_LE16_TO_CPU(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ if (buf)
+ ixgbe_free(hw, buf);
+ return status;
+}
+
+/**
+ * ixgbe_fwlog_set_support_ena - Set if FW logging is supported by FW
+ * @hw: pointer to the HW struct
+ *
+ * If FW returns success to the ixgbe_aci_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_support_ena flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_fwlog_set_support_ena(struct ixgbe_hw *hw)
+{
+ struct ixgbe_fwlog_cfg cfg;
+ s32 status;
+
+ hw->fwlog_support_ena = false;
+
+ /* don't call ixgbe_fwlog_get() because that would overwrite the cached
+ * configuration from the call to ixgbe_fwlog_init(), which is expected
+ * to be called prior to this function
+ */
+ status = ixgbe_aci_fwlog_get(hw, &cfg);
+ if (!status)
+ hw->fwlog_support_ena = true;
+}
+
+/**
+ * ixgbe_fwlog_get - Get the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config to populate based on current firmware logging settings
+ *
+ * Get the current firmware logging settings.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_fwlog_get(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg)
+{
+ s32 status;
+
+ if (!ixgbe_fwlog_supported(hw))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ status = ixgbe_aci_fwlog_get(hw, cfg);
+ if (status)
+ return status;
+
+ ixgbe_fwlog_cache_cfg(hw, cfg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fwlog_event_dump - Dump the event received over the Admin Receive Queue
+ * @hw: pointer to the HW structure
+ * @desc: Admin Receive Queue descriptor
+ * @buf: buffer that contains the FW log event data
+ *
+ * If the driver receives the ixgbe_aci_opc_fw_logs_event on the Admin Receive
+ * Queue, then it should call this function to dump the FW log data.
+ */
+void ixgbe_fwlog_event_dump(struct ixgbe_hw *hw,
+ struct ixgbe_aci_desc *desc, void *buf)
+{
+ if (!ixgbe_fwlog_supported(hw))
+ return;
+
+ ixgbe_info_fwlog(hw, 32, 1, (u8 *)buf,
+ IXGBE_LE16_TO_CPU(desc->datalen));
+}
+
+/**
* ixgbe_aci_set_health_status_config - Configure FW health events
* @hw: pointer to the HW struct
* @event_source: type of diagnostic events to enable
@@ -3883,9 +4387,14 @@ s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
/* PHY */
phy->ops.init = ixgbe_init_phy_ops_E610;
phy->ops.identify = ixgbe_identify_phy_E610;
- phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ phy->eee_speeds_supported = IXGBE_LINK_SPEED_2_5GB_FULL;
+ else
+ phy->eee_speeds_supported = IXGBE_LINK_SPEED_2_5GB_FULL |
+ IXGBE_LINK_SPEED_5GB_FULL |
+ IXGBE_LINK_SPEED_10GB_FULL;
+
phy->eee_speeds_advertised = phy->eee_speeds_supported;
/* Additional ops overrides for e610 to go here */
@@ -4513,19 +5022,18 @@ s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+ /* setup only speeds which are defined for [0x0601/0x0600].eee_cap */
if (enable_eee) {
- if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_100_FULL)
eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
- if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_1GB_FULL)
eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
- if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
- eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
- if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_2_5GBASE_T;
+ if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_5GB_FULL)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_5GBASE_T;
+ if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_10GB_FULL)
eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
- if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
- eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
- if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
- eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
}
/* Set EEE capability for particular PHY types */
diff --git a/sys/dev/ixgbe/ixgbe_e610.h b/sys/dev/ixgbe/ixgbe_e610.h
index 94e600139499..7af5506d85e8 100644
--- a/sys/dev/ixgbe/ixgbe_e610.h
+++ b/sys/dev/ixgbe/ixgbe_e610.h
@@ -169,6 +169,19 @@ s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source);
+s32 ixgbe_fwlog_init(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg);
+bool ixgbe_fwlog_supported(struct ixgbe_hw *hw);
+s32 ixgbe_fwlog_set(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg);
+s32 ixgbe_fwlog_update_modules(struct ixgbe_hw *hw,
+ struct ixgbe_fwlog_module_entry *entries,
+ u16 num_entries);
+s32 ixgbe_fwlog_register(struct ixgbe_hw *hw);
+s32 ixgbe_fwlog_unregister(struct ixgbe_hw *hw);
+void ixgbe_fwlog_set_support_ena(struct ixgbe_hw *hw);
+s32 ixgbe_fwlog_get(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg);
+void ixgbe_fwlog_event_dump(struct ixgbe_hw *hw,
+ struct ixgbe_aci_desc *desc, void *buf);
+
/* E610 operations */
s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw);
diff --git a/sys/dev/ixgbe/ixgbe_features.h b/sys/dev/ixgbe/ixgbe_features.h
index bee9040319d8..bbc7507b29ac 100644
--- a/sys/dev/ixgbe/ixgbe_features.h
+++ b/sys/dev/ixgbe/ixgbe_features.h
@@ -58,6 +58,7 @@
#define IXGBE_FEATURE_NEEDS_CTXD (u32)(1 << 13)
#define IXGBE_FEATURE_RECOVERY_MODE (u32)(1 << 15)
#define IXGBE_FEATURE_DBG_DUMP (u32)(1 << 16)
+#define IXGBE_FEATURE_FW_LOGGING (u32)(1 << 17)
/* Check for OS support. Undefine features if not included in the OS */
#ifndef PCI_IOV
diff --git a/sys/dev/ixgbe/ixgbe_fw_logging.c b/sys/dev/ixgbe/ixgbe_fw_logging.c
new file mode 100644
index 000000000000..6202d504423f
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_fw_logging.c
@@ -0,0 +1,467 @@
+/**
+ * @file ixgbe_fw_logging.c
+ * @brief firmware logging sysctls
+ *
+ * Contains sysctls to enable and configure firmware logging debug support.
+ */
+
+ #include "ixgbe.h"
+
+ /**
+ * ixgbe_reconfig_fw_log - Re-program firmware logging configuration
+ * @sc: private softc structure
+ * @cfg: firmware log configuration to latch
+ *
+ * If the adminq is currently active, ask firmware to update the logging
+ * configuration. If the adminq is currently down, then do nothing. In this
+ * case, ixgbe_init_hw() will re-configure firmware logging as soon as it brings
+ * up the adminq.
+ */
+ static int
+ ixgbe_reconfig_fw_log(struct ixgbe_softc *sc, struct ixgbe_fwlog_cfg *cfg)
+ {
+ int status;
+
+ ixgbe_fwlog_init(&sc->hw, cfg);
+
+ if (!ixgbe_fwlog_supported(&sc->hw))
+ return (0);
+
+ status = ixgbe_fwlog_set(&sc->hw, cfg);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT1("Failed to reconfigure firmware logging, status %d\n",
+ status);
+ return (ENODEV);
+ }
+
+ return (0);
+ }
+
+ /**
+ * ixgbe_sysctl_fwlog_set_cfg_options - Sysctl for setting fwlog cfg options
+ * @oidp: sysctl oid structure
+ * @arg1: private softc structure
+ * @arg2: option to adjust
+ * @req: sysctl request pointer
+ *
+ * On read: displays whether firmware logging was reported during attachment
+ * On write: enables/disables firmware logging during attach phase
+ *
+ * This has no effect on the legacy (V1) version of firmware logging.
+ */
+ static int
+ ixgbe_sysctl_fwlog_set_cfg_options(SYSCTL_HANDLER_ARGS)
+ {
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
+ int error;
+ u16 option = (u16)arg2;
+ bool enabled;
+
+ enabled = !!(cfg->options & option);
+
+ error = sysctl_handle_bool(oidp, &enabled, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (enabled)
+ cfg->options |= option;
+ else
+ cfg->options &= ~option;
+
+ return ixgbe_reconfig_fw_log(sc, cfg);
+ }
+
+ /**
+ * ixgbe_sysctl_fwlog_log_resolution - Sysctl for setting log message resolution
+ * @oidp: sysctl oid structure
+ * @arg1: private softc structure
+ * @arg2: __unused__
+ * @req: sysctl request pointer
+ *
+ * On read: displays message queue limit before posting
+ * On write: sets message queue limit before posting
+ *
+ * This has no effect on the legacy (V1) version of firmware logging.
+ */
+ static int
+ ixgbe_sysctl_fwlog_log_resolution(SYSCTL_HANDLER_ARGS)
+ {
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
+ int error;
+ u8 resolution;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ resolution = cfg->log_resolution;
+
+ error = sysctl_handle_8(oidp, &resolution, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if ((resolution < IXGBE_ACI_FW_LOG_MIN_RESOLUTION) ||
+ (resolution > IXGBE_ACI_FW_LOG_MAX_RESOLUTION)) {
+ DEBUGOUT("Log resolution out-of-bounds\n");
+ return (EINVAL);
+ }
+
+ cfg->log_resolution = resolution;
+
+ return ixgbe_reconfig_fw_log(sc, cfg);
+ }
+
+ /**
+ * ixgbe_sysctl_fwlog_register - Sysctl for (de)registering firmware logs
+ * @oidp: sysctl oid structure
+ * @arg1: private softc structure
+ * @arg2: __unused__
+ * @req: sysctl request pointer
+ *
+ * On read: displays whether firmware logging is registered
+ * On write: (de)registers firmware logging.
+ */
+ static int
+ ixgbe_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS)
+ {
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
+ int status;
+ int error;
+ u8 enabled;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (cfg->options & IXGBE_FWLOG_OPTION_IS_REGISTERED)
+ enabled = true;
+ else
+ enabled = false;
+
+ error = sysctl_handle_bool(oidp, &enabled, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (enabled) {
+ status = ixgbe_fwlog_register(&sc->hw);
+ if (status == IXGBE_SUCCESS)
+ sc->feat_en |= IXGBE_FEATURE_FW_LOGGING;
+ } else {
+ status = ixgbe_fwlog_unregister(&sc->hw);
+ if (status == IXGBE_SUCCESS)
+ sc->feat_en &= ~IXGBE_FEATURE_FW_LOGGING;
+ }
+
+ if (status != IXGBE_SUCCESS)
+ return (EIO);
+
+ return (0);
+ }
+
+ /**
+ * ixgbe_log_sev_str - Convert log level to a string
+ * @log_level: the log level to convert
+ *
+ * Convert the u8 log level of a FW logging module into a readable
+ * string for outputting in a sysctl.
+ */
+ struct ixgbe_str_buf {
+ char str[IXGBE_STR_BUF_LEN];
+ };
+
+ static struct ixgbe_str_buf
+ _ixgbe_log_sev_str(u8 log_level)
+ {
+ struct ixgbe_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (log_level) {
+ case IXGBE_FWLOG_LEVEL_NONE:
+ str = "none";
+ break;
+ case IXGBE_FWLOG_LEVEL_ERROR:
+ str = "error";
+ break;
+ case IXGBE_FWLOG_LEVEL_WARNING:
+ str = "warning";
+ break;
+ case IXGBE_FWLOG_LEVEL_NORMAL:
+ str = "normal";
+ break;
+ case IXGBE_FWLOG_LEVEL_VERBOSE:
+ str = "verbose";
+ break;
+ default:
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, IXGBE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, IXGBE_STR_BUF_LEN, "%u", log_level);
+
+ return buf;
+ }
+
+ #define ixgbe_log_sev_str(log_level) _ixgbe_log_sev_str(log_level).str
+
+ /**
+ * ixgbe_sysctl_fwlog_module_log_severity - Add tunables for a FW logging module
+ * @oidp: sysctl oid structure
+ * @arg1: private softc structure
+ * @arg2: index to logging module
+ * @req: sysctl request pointer
+ */
+ static int
+ ixgbe_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS)
+ {
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
+ struct sbuf *sbuf;
+ char *sev_str_end;
+ enum ixgbe_aci_fw_logging_mod module = (enum ixgbe_aci_fw_logging_mod)arg2;
+ int error, ll_num;
+ u8 log_level;
+ char sev_str[16];
+ bool sev_set = false;
+
+ log_level = cfg->module_entries[module].log_level;
+ sbuf = sbuf_new(NULL, sev_str, sizeof(sev_str), SBUF_FIXEDLEN);
+ sbuf_printf(sbuf, "%d<%s>", log_level, ixgbe_log_sev_str(log_level));
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ error = sysctl_handle_string(oidp, sev_str, sizeof(sev_str), req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (strcasecmp(ixgbe_log_sev_str(IXGBE_FWLOG_LEVEL_VERBOSE), sev_str) == 0) {
+ log_level = IXGBE_FWLOG_LEVEL_VERBOSE;
+ sev_set = true;
+ } else if (strcasecmp(ixgbe_log_sev_str(IXGBE_FWLOG_LEVEL_NORMAL), sev_str) == 0) {
+ log_level = IXGBE_FWLOG_LEVEL_NORMAL;
+ sev_set = true;
+ } else if (strcasecmp(ixgbe_log_sev_str(IXGBE_FWLOG_LEVEL_WARNING), sev_str) == 0) {
+ log_level = IXGBE_FWLOG_LEVEL_WARNING;
+ sev_set = true;
+ } else if (strcasecmp(ixgbe_log_sev_str(IXGBE_FWLOG_LEVEL_ERROR), sev_str) == 0) {
+ log_level = IXGBE_FWLOG_LEVEL_ERROR;
+ sev_set = true;
+ } else if (strcasecmp(ixgbe_log_sev_str(IXGBE_FWLOG_LEVEL_NONE), sev_str) == 0) {
+ log_level = IXGBE_FWLOG_LEVEL_NONE;
+ sev_set = true;
+ }
+
+ if (!sev_set) {
+ ll_num = strtol(sev_str, &sev_str_end, 0);
+ if (sev_str_end == sev_str)
+ ll_num = -1;
+ if ((ll_num >= IXGBE_FWLOG_LEVEL_NONE) &&
+ (ll_num < IXGBE_FWLOG_LEVEL_INVALID))
+ log_level = ll_num;
+ else {
+ DEBUGOUT2("%s: \"%s\" is not a valid log level\n",
+ __func__, sev_str);
+ return (EINVAL);
+ }
+ }
+
+ cfg->module_entries[module].log_level = log_level;
+
+ return ixgbe_reconfig_fw_log(sc, cfg);
+ }
+
+ #define IXGBE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION \
+ "\nControl firmware message limit to send per ARQ event" \
+ "\t\nMin: 1" \
+ "\t\nMax: 128"
+
+ #define IXGBE_SYSCTL_HELP_FWLOG_ARQ_ENA \
+ "\nControl whether to enable/disable reporting to admin Rx queue" \
+ "\n1 - Enable firmware reporting via ARQ" \
+ "\n0 - Disable firmware reporting via ARQ"
+
+ #define IXGBE_SYSCTL_HELP_FWLOG_UART_ENA \
+ "\nControl whether to enable/disable reporting to UART" \
+ "\n1 - Enable firmware reporting via UART" \
+ "\n0 - Disable firmware reporting via UART"
+
+ #define IXGBE_SYSCTL_HELP_FWLOG_ENABLE_ON_LOAD \
+ "\nControl whether to enable logging during the attach phase" \
+ "\n1 - Enable firmware logging during attach phase" \
+ "\n0 - Disable firmware logging during attach phase"
+
+ #define IXGBE_SYSCTL_HELP_FWLOG_REGISTER \
+ "\nControl whether to enable/disable firmware logging" \
+ "\n1 - Enable firmware logging" \
+ "\n0 - Disable firmware logging"
+
+ #define IXGBE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY \
+ "\nControl the level of log output messages for this module" \
+ "\n\tverbose <4> - Verbose messages + (Error|Warning|Normal)" \
+ "\n\tnormal <3> - Normal messages + (Error|Warning)" \
+ "\n\twarning <2> - Warning messages + (Error)" \
+ "\n\terror <1> - Error messages" \
+ "\n\tnone <0> - Disables all logging for this module"
+
+ /**
+ * ixgbe_fw_module_str - Convert a FW logging module to a string name
+ * @module: the module to convert
+ *
+ * Given a FW logging module id, convert it to a shorthand human readable
+ * name, for generating sysctl tunables.
+ */
+ static const char *
+ ixgbe_fw_module_str(enum ixgbe_aci_fw_logging_mod module)
+ {
+ switch (module) {
+ case IXGBE_ACI_FW_LOG_ID_GENERAL:
+ return "general";
+ case IXGBE_ACI_FW_LOG_ID_CTRL:
+ return "ctrl";
+ case IXGBE_ACI_FW_LOG_ID_LINK:
+ return "link";
+ case IXGBE_ACI_FW_LOG_ID_LINK_TOPO:
+ return "link_topo";
+ case IXGBE_ACI_FW_LOG_ID_DNL:
+ return "dnl";
+ case IXGBE_ACI_FW_LOG_ID_I2C:
+ return "i2c";
+ case IXGBE_ACI_FW_LOG_ID_SDP:
+ return "sdp";
+ case IXGBE_ACI_FW_LOG_ID_MDIO:
+ return "mdio";
+ case IXGBE_ACI_FW_LOG_ID_ADMINQ:
+ return "adminq";
+ case IXGBE_ACI_FW_LOG_ID_HDMA:
+ return "hdma";
+ case IXGBE_ACI_FW_LOG_ID_LLDP:
+ return "lldp";
+ case IXGBE_ACI_FW_LOG_ID_DCBX:
+ return "dcbx";
+ case IXGBE_ACI_FW_LOG_ID_DCB:
+ return "dcb";
+ case IXGBE_ACI_FW_LOG_ID_XLR:
+ return "xlr";
+ case IXGBE_ACI_FW_LOG_ID_NVM:
+ return "nvm";
+ case IXGBE_ACI_FW_LOG_ID_AUTH:
+ return "auth";
+ case IXGBE_ACI_FW_LOG_ID_VPD:
+ return "vpd";
+ case IXGBE_ACI_FW_LOG_ID_IOSF:
+ return "iosf";
+ case IXGBE_ACI_FW_LOG_ID_PARSER:
+ return "parser";
+ case IXGBE_ACI_FW_LOG_ID_SW:
+ return "sw";
+ case IXGBE_ACI_FW_LOG_ID_SCHEDULER:
+ return "scheduler";
+ case IXGBE_ACI_FW_LOG_ID_TXQ:
+ return "txq";
+ case IXGBE_ACI_FW_LOG_ID_ACL:
+ return "acl";
+ case IXGBE_ACI_FW_LOG_ID_POST:
+ return "post";
+ case IXGBE_ACI_FW_LOG_ID_WATCHDOG:
+ return "watchdog";
+ case IXGBE_ACI_FW_LOG_ID_TASK_DISPATCH:
+ return "task_dispatch";
+ case IXGBE_ACI_FW_LOG_ID_MNG:
+ return "mng";
+ case IXGBE_ACI_FW_LOG_ID_SYNCE:
+ return "synce";
+ case IXGBE_ACI_FW_LOG_ID_HEALTH:
+ return "health";
+ case IXGBE_ACI_FW_LOG_ID_TSDRV:
+ return "tsdrv";
+ case IXGBE_ACI_FW_LOG_ID_PFREG:
+ return "pfreg";
+ case IXGBE_ACI_FW_LOG_ID_MDLVER:
+ return "mdlver";
+ case IXGBE_ACI_FW_LOG_ID_MAX:
+ return "unknown";
+ }
+
+ /* The compiler generates errors on unhandled enum values if we omit
+ * the default case.
+ */
+ return "unknown";
+ }
+
+ /**
+ * ixgbe_add_fw_logging_tunables - Add tunables to configure FW logging events
+ * @sc: private softc structure
+ * @parent: parent node to add the tunables under
+ *
+ * Add tunables for configuring the firmware logging support. This includes
+ * a control to enable the logging, and controls for each module to configure
+ * which events to receive.
+ */
+ void
+ ixgbe_add_fw_logging_tunables(struct ixgbe_softc *sc, struct sysctl_oid *parent)
+ {
+ struct sysctl_oid_list *parent_list, *fwlog_list, *module_list;
+ struct sysctl_oid *fwlog_node, *module_node;
+ struct sysctl_ctx_list *ctx;
+ struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_fwlog_cfg *cfg;
+ device_t dev = sc->dev;
+ enum ixgbe_aci_fw_logging_mod module;
+ u16 i;
+
+ cfg = &hw->fwlog_cfg;
+ ctx = device_get_sysctl_ctx(dev);
+ parent_list = SYSCTL_CHILDREN(parent);
+
+ fwlog_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "fw_log",
+ CTLFLAG_RD, NULL,
+ "Firmware Logging");
+ fwlog_list = SYSCTL_CHILDREN(fwlog_node);
+
+ cfg->log_resolution = 10;
+ SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "log_resolution",
+ CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
+ 0, ixgbe_sysctl_fwlog_log_resolution,
+ "CU", IXGBE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION);
+
+ cfg->options |= IXGBE_FWLOG_OPTION_ARQ_ENA;
+ SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "arq_en",
+ CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
+ IXGBE_FWLOG_OPTION_ARQ_ENA, ixgbe_sysctl_fwlog_set_cfg_options,
+ "CU", IXGBE_SYSCTL_HELP_FWLOG_ARQ_ENA);
+
+ SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "uart_en",
+ CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
+ IXGBE_FWLOG_OPTION_UART_ENA, ixgbe_sysctl_fwlog_set_cfg_options,
+ "CU", IXGBE_SYSCTL_HELP_FWLOG_UART_ENA);
+
+ SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "on_load",
+ CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
+ IXGBE_FWLOG_OPTION_REGISTER_ON_INIT, ixgbe_sysctl_fwlog_set_cfg_options,
+ "CU", IXGBE_SYSCTL_HELP_FWLOG_ENABLE_ON_LOAD);
+
+ SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "register",
+ CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
+ 0, ixgbe_sysctl_fwlog_register,
+ "CU", IXGBE_SYSCTL_HELP_FWLOG_REGISTER);
+
+ module_node = SYSCTL_ADD_NODE(ctx, fwlog_list, OID_AUTO, "severity",
+ CTLFLAG_RD, NULL,
+ "Level of log output");
+
+ module_list = SYSCTL_CHILDREN(module_node);
+
+ for (i = 0; i < IXGBE_ACI_FW_LOG_ID_MAX; i++) {
+ /* Setup some defaults */
+ cfg->module_entries[i].module_id = i;
+ cfg->module_entries[i].log_level = IXGBE_FWLOG_LEVEL_NONE;
+ module = (enum ixgbe_aci_fw_logging_mod)i;
+
+ SYSCTL_ADD_PROC(ctx, module_list,
+ OID_AUTO, ixgbe_fw_module_str(module),
+ CTLTYPE_STRING | CTLFLAG_RWTUN, sc,
+ module, ixgbe_sysctl_fwlog_module_log_severity,
+ "A", IXGBE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY);
+ }
+ }
+ \ No newline at end of file
diff --git a/sys/dev/ixgbe/ixgbe_osdep.c b/sys/dev/ixgbe/ixgbe_osdep.c
index 9bd9ce63b786..d96e15f4f87f 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.c
+++ b/sys/dev/ixgbe/ixgbe_osdep.c
@@ -140,3 +140,39 @@ ixgbe_destroy_lock(struct ixgbe_lock *lock)
if (mtx_initialized(&lock->mutex))
mtx_destroy(&lock->mutex);
}
+
+/**
+ * ixgbe_info_fwlog - Format and print an array of values to the console
+ * @hw: private hardware structure
+ * @rowsize: preferred number of rows to use
+ * @groupsize: preferred size in bytes to print each chunk
+ * @buf: the array buffer to print
+ * @len: size of the array buffer
+ *
+ * Format the given array as a series of uint8_t values with hexadecimal
+ * notation and log the contents to the console log. This variation is
+ * specific to firmware logging.
+ *
+ * TODO: Currently only supports a group size of 1, due to the way hexdump is
+ * implemented.
+ */
+void
+ixgbe_info_fwlog(struct ixgbe_hw *hw, uint32_t rowsize, uint32_t __unused groupsize,
+ uint8_t *buf, size_t len)
+{
+ device_t dev = ((struct ixgbe_softc *)hw->back)->dev;
+ char prettyname[20];
+
+ if (!ixgbe_fwlog_supported(hw))
+ return;
+
+ /* Format the device header to a string */
+ snprintf(prettyname, sizeof(prettyname), "%s: FWLOG: ",
+ device_get_nameunit(dev));
+
+ /* Make sure the row-size isn't too large */
+ if (rowsize > 0xFF)
+ rowsize = 0xFF;
+
+ hexdump(buf, len, prettyname, HD_OMIT_CHARS | rowsize);
+}
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index 8cf1d13736ce..ec2b3274ec31 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -253,4 +253,7 @@ ixgbe_free(struct ixgbe_hw __unused *hw, void *addr)
free(addr, M_DEVBUF);
}
+void ixgbe_info_fwlog(struct ixgbe_hw *hw, uint32_t rowsize,
+ uint32_t groupsize, uint8_t *buf, size_t len);
+
#endif /* _IXGBE_OSDEP_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_sriov.h b/sys/dev/ixgbe/ixgbe_sriov.h
index e5a78a7220cc..3c456ee819f2 100644
--- a/sys/dev/ixgbe/ixgbe_sriov.h
+++ b/sys/dev/ixgbe/ixgbe_sriov.h
@@ -94,7 +94,7 @@ u32 ixgbe_get_mrqc(int);
#define ixgbe_align_all_queue_indices(_a)
#define ixgbe_vf_que_index(_a, _b, _c) (_c)
#define ixgbe_get_mtqc(_a) IXGBE_MTQC_64Q_1PB
-#define ixgbe_get_mrqc(_a) 0
+#define ixgbe_get_mrqc(_a) IXGBE_MRQC_RSSEN
#endif /* PCI_IOV */
diff --git a/sys/dev/ixgbe/ixgbe_type_e610.h b/sys/dev/ixgbe/ixgbe_type_e610.h
index e300030c3ba4..da46e503f660 100644
--- a/sys/dev/ixgbe/ixgbe_type_e610.h
+++ b/sys/dev/ixgbe/ixgbe_type_e610.h
@@ -721,6 +721,7 @@ struct ixgbe_aci_cmd_list_caps_elem {
#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
#define IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE 0x0090
#define IXGBE_ACI_CAPS_NEXT_CLUSTER_ID 0x0096
+#define IXGBE_ACI_CAPS_EEE 0x009B
u8 major_ver;
u8 minor_ver;
/* Number of resources described by this capability */
@@ -836,10 +837,8 @@ struct ixgbe_aci_cmd_get_phy_caps_data {
#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
-#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
-#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
-#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
-#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+#define IXGBE_ACI_PHY_EEE_EN_5GBASE_T BIT(11)
+#define IXGBE_ACI_PHY_EEE_EN_2_5GBASE_T BIT(12)
__le16 eeer_value;
u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
u8 phy_fw_ver[8];
@@ -869,7 +868,9 @@ struct ixgbe_aci_cmd_get_phy_caps_data {
#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
u8 qualified_module_count;
- u8 rsvd2[7]; /* Bytes 47:41 reserved */
+ u8 rsvd2;
+ __le16 eee_entry_delay;
+ u8 rsvd3[4];
#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
struct {
u8 v_oui[3];
@@ -893,11 +894,38 @@ struct ixgbe_aci_cmd_set_phy_cfg {
IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_phy_cfg);
+/* Set PHY config obsolete command data structure (<FW 1.40) */
+struct ixgbe_aci_cmd_set_phy_cfg_data_pre_1_40 {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_set_phy_cfg_data_pre_1_40);
+
+#pragma pack(1)
/* Set PHY config command data structure */
struct ixgbe_aci_cmd_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
u8 caps;
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+ __le16 eee_entry_delay;
+};
+
+IXGBE_CHECK_STRUCT_LEN(26, ixgbe_aci_cmd_set_phy_cfg_data);
+#pragma pack()
+
+/* Set PHY config capabilities (@caps) defines */
#define IXGBE_ACI_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0)
#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
@@ -906,14 +934,7 @@ struct ixgbe_aci_cmd_set_phy_cfg_data {
#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
- u8 low_power_ctrl_an;
- __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
- __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
- u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
- u8 module_compliance_enforcement;
-};
-IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_set_phy_cfg_data);
/* Restart AN command data structure (direct 0x0605)
* Also used for response, with only the lport_num field present.
@@ -1035,8 +1056,9 @@ struct ixgbe_aci_cmd_get_link_status_data {
#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
__le16 reserved3; /* Aligns next field to 8-byte boundary */
- u8 ext_fec_status;
-#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 eee_status;
+#define IXGBE_ACI_LINK_EEE_ENABLED BIT(2)
+#define IXGBE_ACI_LINK_EEE_ACTIVE BIT(3)
u8 reserved4;
__le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
@@ -2034,6 +2056,7 @@ struct ixgbe_link_status {
* ixgbe_aci_get_phy_caps structure
*/
u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+ u8 eee_status;
};
/* Common HW capabilities for SW use */
@@ -2112,6 +2135,12 @@ struct ixgbe_hw_common_caps {
u8 apm_wol_support;
u8 acpi_prog_mthd;
u8 proxy_support;
+ u8 eee_support;
+#define IXGBE_EEE_SUPPORT_100BASE_TX BIT(0)
+#define IXGBE_EEE_SUPPORT_1000BASE_T BIT(1)
+#define IXGBE_EEE_SUPPORT_10GBASE_T BIT(2)
+#define IXGBE_EEE_SUPPORT_5GBASE_T BIT(3)
+#define IXGBE_EEE_SUPPORT_2_5GBASE_T BIT(4)
bool sec_rev_disabled;
bool update_disabled;
bool nvm_unified_update;
diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c
index 57cec5b52e18..561fe6f0f78a 100644
--- a/sys/dev/ixgbe/ixgbe_x540.c
+++ b/sys/dev/ixgbe/ixgbe_x540.c
@@ -878,7 +878,21 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- msec_delay(2);
+
+ /*
+ * EEPROM / flash access requires a 2ms sleep or interacting with
+ * them isn't stable. However, a 2ms delay for all sync operations
+ * is very expensive for MDIO access.
+ *
+ * So use a 10us delay for PHY0/PHY1 MDIO and management access and
+ * 2ms for everything else. This keep MDIO access (eg from a switch
+ * driver) fast.
+ */
+ if (mask &
+ (IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_SW_MNG_SM))
+ usec_delay(10);
+ else
+ usec_delay(2000);
}
/**
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index 04b8279bdc59..bca5abd370f5 100644
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -33,7 +33,7 @@
/*
** IXL driver TX/RX Routines:
-** This was seperated to allow usage by
+** This was separated to allow usage by
** both the PF and VF drivers.
*/
diff --git a/sys/dev/mlx5/driver.h b/sys/dev/mlx5/driver.h
index cdefe7e013f6..ba6714c5c7b6 100644
--- a/sys/dev/mlx5/driver.h
+++ b/sys/dev/mlx5/driver.h
@@ -890,6 +890,7 @@ struct mlx5_cmd_work_ent {
u16 op;
u8 busy;
bool polling;
+ struct work_struct freew;
};
struct mlx5_pas {
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_cmd.c b/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
index 86c721a83cb7..e314a04c294f 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
@@ -802,6 +802,15 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, MLX5_CMD_MODE_EVENTS);
}
+static void
+cmd_free_work(struct work_struct *work)
+{
+ struct mlx5_cmd_work_ent *ent = container_of(work,
+ struct mlx5_cmd_work_ent, freew);
+
+ free_cmd(ent);
+}
+
static void complete_command(struct mlx5_cmd_work_ent *ent)
{
struct mlx5_cmd *cmd = ent->cmd;
@@ -856,7 +865,8 @@ static void complete_command(struct mlx5_cmd_work_ent *ent)
free_msg(dev, ent->in);
err = err ? err : ent->status;
- free_cmd(ent);
+ INIT_WORK(&ent->freew, cmd_free_work);
+ schedule_work(&ent->freew);
callback(err, context);
} else {
complete(&ent->done);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index 0baeab31b100..9bcb0dcf8e16 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -1135,6 +1135,25 @@ mlx5e_hw_clock(struct mlx5e_priv *priv)
}
/*
+ * Seed the first calibration point so that base_prev and clbr_hw_prev
+ * are always valid. Called once during attach before the first
+ * calibration callout fires.
+ */
+static void
+mlx5e_seed_calibration(struct mlx5e_priv *priv)
+{
+ struct mlx5e_clbr_point *cp;
+ struct timespec ts;
+
+ cp = &priv->clbr_points[0];
+ cp->clbr_hw_curr = mlx5e_hw_clock(priv);
+ nanouptime(&ts);
+ cp->base_curr = mlx5e_timespec2usec(&ts);
+ cp->clbr_hw_prev = cp->clbr_hw_curr - 1;
+ cp->base_prev = cp->base_curr - 1;
+}
+
+/*
* The calibration callout, it runs either in the context of the
* thread which enables calibration, or in callout. It takes the
* snapshot of system and adapter clocks, then advances the pointers to
@@ -1147,6 +1166,9 @@ mlx5e_calibration_callout(void *arg)
struct mlx5e_priv *priv;
struct mlx5e_clbr_point *next, *curr;
struct timespec ts;
+ uint64_t hw_delta_new, hw_delta_old;
+ uint64_t old_nsec, old_projected, old_sec;
+ uint64_t res_n, res_s, res_s_mod, rt_delta_old;
int clbr_curr_next;
priv = arg;
@@ -1175,6 +1197,33 @@ mlx5e_calibration_callout(void *arg)
nanouptime(&ts);
next->base_curr = mlx5e_timespec2usec(&ts);
+ /*
+ * Ensure monotonicity across calibration transitions. Compute
+ * what the old calibration would extrapolate to at the new
+ * hw_curr. If the new base_curr is less, clamp it so the new
+ * slope is at least as steep as the old one. This prevents
+ * packets from seeing time go backwards when the slope drops.
+ *
+ * Use the same split-seconds technique as mlx5e_mbuf_tstmp()
+ * to avoid overflowing uint64_t in the multiplication.
+ */
+ hw_delta_new = next->clbr_hw_curr - curr->clbr_hw_curr;
+ rt_delta_old = curr->base_curr - curr->base_prev;
+ hw_delta_old = curr->clbr_hw_curr - curr->clbr_hw_prev;
+ old_sec = hw_delta_new / priv->cclk;
+ old_nsec = hw_delta_new % priv->cclk;
+ res_s = old_sec * rt_delta_old;
+ res_n = old_nsec * rt_delta_old;
+ res_s_mod = res_s % hw_delta_old;
+ res_s /= hw_delta_old;
+ res_s_mod *= priv->cclk;
+ res_n += res_s_mod;
+ res_n /= hw_delta_old;
+ res_s *= priv->cclk;
+ old_projected = curr->base_curr + res_s + res_n;
+ if (next->base_curr < old_projected)
+ next->base_curr = old_projected;
+
curr->clbr_gen = 0;
atomic_thread_fence_rel();
priv->clbr_curr = clbr_curr_next;
@@ -3728,6 +3777,8 @@ out:
break;
case SIOCGI2C:
+ /* fallthru */
+ case SIOCGI2CPB:
ifr = (struct ifreq *)data;
/*
@@ -3737,6 +3788,9 @@ out:
error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (error)
break;
+ /* ensure page and bank are 0 for legacy SIOCGI2C ioctls */
+ if (command == SIOCGI2C)
+ i2c.page = i2c.bank = 0;
if (i2c.len > sizeof(i2c.data)) {
error = EINVAL;
@@ -3778,8 +3832,17 @@ out:
error = EINVAL;
goto err_i2c;
}
+
+ if (i2c.bank != 0) {
+ mlx5_en_err(ifp,
+ "Query eeprom failed, Invalid Bank: %X\n",
+ i2c.bank);
+ error = EINVAL;
+ goto err_i2c;
+ }
+
error = mlx5_query_eeprom(priv->mdev,
- read_addr, MLX5_EEPROM_LOW_PAGE,
+ read_addr, i2c.page,
(uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
(uint32_t *)i2c.data, &size_read);
if (error) {
@@ -3791,7 +3854,7 @@ out:
if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
error = mlx5_query_eeprom(priv->mdev,
- read_addr, MLX5_EEPROM_LOW_PAGE,
+ read_addr, i2c.page,
(uint32_t)(i2c.offset + size_read),
(uint32_t)(i2c.len - size_read), module_num,
(uint32_t *)(i2c.data + size_read), &size_read);
@@ -4873,6 +4936,7 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
callout_init(&priv->tstmp_clbr, 1);
/* Pull out the frequency of the clock in hz */
priv->cclk = (uint64_t)MLX5_CAP_GEN(mdev, device_frequency_khz) * 1000ULL;
+ mlx5e_seed_calibration(priv);
mlx5e_reset_calibration_callout(priv);
pa.pa_version = PFIL_VERSION;
diff --git a/sys/dev/nvme/nvme_ahci.c b/sys/dev/nvme/nvme_ahci.c
index c50971f83975..8be3887d835e 100644
--- a/sys/dev/nvme/nvme_ahci.c
+++ b/sys/dev/nvme/nvme_ahci.c
@@ -77,9 +77,6 @@ nvme_ahci_attach(device_t dev)
ret = ENOMEM;
goto bad;
}
- ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
- ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
- ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
/* Allocate and setup IRQ */
ctrlr->rid = 0;
diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c
index b75033300061..753a8b380a75 100644
--- a/sys/dev/nvme/nvme_ctrlr.c
+++ b/sys/dev/nvme/nvme_ctrlr.c
@@ -1346,29 +1346,51 @@ nvme_ctrlr_shared_handler(void *arg)
#define NVME_MAX_PAGES (int)(1024 / sizeof(vm_page_t))
static int
+nvme_page_count(vm_offset_t start, size_t len)
+{
+ return atop(round_page(start + len) - trunc_page(start));
+}
+
+static int
nvme_user_ioctl_req(vm_offset_t addr, size_t len, bool is_read,
- vm_page_t *upages, int max_pages, int *npagesp, struct nvme_request **req,
+ vm_page_t **upages, int max_pages, int *npagesp, struct nvme_request **req,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
vm_prot_t prot = VM_PROT_READ;
- int err;
+ int err, npages;
+ vm_page_t *upages_us;
+
+ upages_us = *upages;
+ npages = nvme_page_count(addr, len);
+ if (npages > atop(maxphys))
+ return (EINVAL);
+ if (npages > max_pages)
+ upages_us = malloc(npages * sizeof(vm_page_t), M_NVME,
+ M_ZERO | M_WAITOK);
if (is_read)
prot |= VM_PROT_WRITE; /* Device will write to host memory */
err = vm_fault_hold_pages(&curproc->p_vmspace->vm_map,
- addr, len, prot, upages, max_pages, npagesp);
- if (err != 0)
+ addr, len, prot, upages_us, npages, npagesp);
+ if (err != 0) {
+ if (*upages != upages_us)
+ free(upages_us, M_NVME);
return (err);
+ }
*req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
- (*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK);
+ (*req)->payload = memdesc_vmpages(upages_us, len, addr & PAGE_MASK);
(*req)->payload_valid = true;
+ if (*upages != upages_us)
+ *upages = upages_us;
return (0);
}
static void
-nvme_user_ioctl_free(vm_page_t *pages, int npage)
+nvme_user_ioctl_free(vm_page_t *pages, int npage, bool freeit)
{
vm_page_unhold_pages(pages, npage);
+ if (freeit)
+ free(pages, M_NVME);
}
static void
@@ -1400,7 +1422,8 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
struct mtx *mtx;
int ret = 0;
int npages = 0;
- vm_page_t upages[NVME_MAX_PAGES];
+ vm_page_t upages_small[NVME_MAX_PAGES];
+ vm_page_t *upages = upages_small;
if (pt->len > 0) {
if (pt->len > ctrlr->max_xfer_size) {
@@ -1411,7 +1434,7 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
}
if (is_user) {
ret = nvme_user_ioctl_req((vm_offset_t)pt->buf, pt->len,
- pt->is_read, upages, nitems(upages), &npages, &req,
+ pt->is_read, &upages, nitems(upages_small), &npages, &req,
nvme_pt_done, pt);
if (ret != 0)
return (ret);
@@ -1449,7 +1472,7 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
mtx_unlock(mtx);
if (npages > 0)
- nvme_user_ioctl_free(upages, npages);
+ nvme_user_ioctl_free(upages, npages, upages != upages_small);
return (ret);
}
@@ -1477,7 +1500,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
struct mtx *mtx;
int ret = 0;
int npages = 0;
- vm_page_t upages[NVME_MAX_PAGES];
+ vm_page_t upages_small[NVME_MAX_PAGES];
+ vm_page_t *upages = upages_small;
/*
* We don't support metadata.
@@ -1494,8 +1518,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
}
if (is_user) {
ret = nvme_user_ioctl_req(npc->addr, npc->data_len,
- npc->opcode & 0x1, upages, nitems(upages), &npages,
- &req, nvme_npc_done, npc);
+ npc->opcode & 0x1, &upages, nitems(upages_small),
+ &npages, &req, nvme_npc_done, npc);
if (ret != 0)
return (ret);
} else
@@ -1533,7 +1557,7 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
mtx_unlock(mtx);
if (npages > 0)
- nvme_user_ioctl_free(upages, npages);
+ nvme_user_ioctl_free(upages, npages, upages != upages_small);
return (ret);
}
diff --git a/sys/dev/nvme/nvme_pci.c b/sys/dev/nvme/nvme_pci.c
index 74191df52058..55cba580d6ca 100644
--- a/sys/dev/nvme/nvme_pci.c
+++ b/sys/dev/nvme/nvme_pci.c
@@ -223,10 +223,6 @@ nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
}
}
- ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
- ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
- ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
-
return (0);
}
diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h
index 3dc62bc11384..32c8cf91c1db 100644
--- a/sys/dev/nvme/nvme_private.h
+++ b/sys/dev/nvme/nvme_private.h
@@ -225,8 +225,6 @@ struct nvme_controller {
#define QUIRK_INTEL_ALIGNMENT 4 /* Pre NVMe 1.3 performance alignment */
#define QUIRK_AHCI 8 /* Attached via AHCI redirect */
- bus_space_tag_t bus_tag;
- bus_space_handle_t bus_handle;
int resource_id;
struct resource *resource;
@@ -286,8 +284,6 @@ struct nvme_controller {
struct nvme_qpair adminq;
struct nvme_qpair *ioq;
- struct nvme_registers *regs;
-
struct nvme_controller_data cdata;
struct nvme_namespace ns[NVME_MAX_NAMESPACES];
@@ -330,20 +326,17 @@ struct nvme_controller {
offsetof(struct nvme_registers, reg)
#define nvme_mmio_read_4(sc, reg) \
- bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
- nvme_mmio_offsetof(reg))
+ bus_read_4((sc)->resource, nvme_mmio_offsetof(reg))
#define nvme_mmio_write_4(sc, reg, val) \
- bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
- nvme_mmio_offsetof(reg), val)
+ bus_write_4((sc)->resource, nvme_mmio_offsetof(reg), val)
#define nvme_mmio_write_8(sc, reg, val) \
do { \
- bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
- nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
- bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
- nvme_mmio_offsetof(reg)+4, \
- (val & 0xFFFFFFFF00000000ULL) >> 32); \
+ bus_write_4((sc)->resource, nvme_mmio_offsetof(reg), \
+ (val) & 0xFFFFFFFF); \
+ bus_write_4((sc)->resource, nvme_mmio_offsetof(reg) + 4, \
+ ((val) & 0xFFFFFFFF00000000ULL) >> 32); \
} while (0);
#define nvme_printf(ctrlr, fmt, args...) \
diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c
index f7dc231d74df..e31bf818ed35 100644
--- a/sys/dev/nvme/nvme_qpair.c
+++ b/sys/dev/nvme/nvme_qpair.c
@@ -476,8 +476,8 @@ _nvme_qpair_process_completions(struct nvme_qpair *qpair)
}
if (done) {
- bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
- qpair->cq_hdbl_off, qpair->cq_head);
+ bus_write_4(qpair->ctrlr->resource, qpair->cq_hdbl_off,
+ qpair->cq_head);
}
return (done);
@@ -1068,8 +1068,7 @@ nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- bus_space_write_4(ctrlr->bus_tag, ctrlr->bus_handle,
- qpair->sq_tdbl_off, qpair->sq_tail);
+ bus_write_4(ctrlr->resource, qpair->sq_tdbl_off, qpair->sq_tail);
qpair->num_cmds++;
}
diff --git a/sys/dev/nvme/nvme_sim.c b/sys/dev/nvme/nvme_sim.c
index a4bb5df4e456..b9f09c8d1f61 100644
--- a/sys/dev/nvme/nvme_sim.c
+++ b/sys/dev/nvme/nvme_sim.c
@@ -496,4 +496,4 @@ static driver_t nvme_sim_driver = {
};
DRIVER_MODULE(nvme_sim, nvme, nvme_sim_driver, NULL, NULL);
-MODULE_VERSION(nvme_shim, 1);
+MODULE_VERSION(nvme_sim, 1);
diff --git a/sys/dev/nvmf/controller/nvmft_controller.c b/sys/dev/nvmf/controller/nvmft_controller.c
index 1618c1f96dac..4c1b28b89265 100644
--- a/sys/dev/nvmf/controller/nvmft_controller.c
+++ b/sys/dev/nvmf/controller/nvmft_controller.c
@@ -227,7 +227,7 @@ nvmft_handoff_io_queue(struct nvmft_port *np, enum nvmf_trtype trtype,
if (ctrlr == NULL) {
mtx_unlock(&np->lock);
printf("NVMFT: Nonexistent controller %u for I/O queue %u from %.*s\n",
- ctrlr->cntlid, qid, (int)sizeof(data->hostnqn),
+ cntlid, qid, (int)sizeof(data->hostnqn),
data->hostnqn);
nvmft_connect_invalid_parameters(qp, cmd, true,
offsetof(struct nvmf_fabric_connect_data, cntlid));
diff --git a/sys/dev/ofw/openfirm.c b/sys/dev/ofw/openfirm.c
index b5f58b86a9c3..a8d3786ed152 100644
--- a/sys/dev/ofw/openfirm.c
+++ b/sys/dev/ofw/openfirm.c
@@ -381,11 +381,11 @@ OF_getproplen(phandle_t package, const char *propname)
}
/* Check existence of a property of a package. */
-int
+bool
OF_hasprop(phandle_t package, const char *propname)
{
- return (OF_getproplen(package, propname) >= 0 ? 1 : 0);
+ return (OF_getproplen(package, propname) >= 0);
}
/* Get the value of a property of a package. */
diff --git a/sys/dev/ofw/openfirm.h b/sys/dev/ofw/openfirm.h
index 4e2b035827cb..fd13f4abd29e 100644
--- a/sys/dev/ofw/openfirm.h
+++ b/sys/dev/ofw/openfirm.h
@@ -108,7 +108,7 @@ ssize_t OF_getprop(phandle_t node, const char *propname, void *buf,
size_t len);
ssize_t OF_getencprop(phandle_t node, const char *prop, pcell_t *buf,
size_t len); /* Same as getprop, but maintains endianness */
-int OF_hasprop(phandle_t node, const char *propname);
+bool OF_hasprop(phandle_t node, const char *propname);
ssize_t OF_searchprop(phandle_t node, const char *propname, void *buf,
size_t len);
ssize_t OF_searchencprop(phandle_t node, const char *propname,
diff --git a/sys/dev/pci/pcireg.h b/sys/dev/pci/pcireg.h
index f6aaf30611e4..3ec7879b8a09 100644
--- a/sys/dev/pci/pcireg.h
+++ b/sys/dev/pci/pcireg.h
@@ -464,6 +464,7 @@
#define PCIP_SERIALBUS_USB_OHCI 0x10
#define PCIP_SERIALBUS_USB_EHCI 0x20
#define PCIP_SERIALBUS_USB_XHCI 0x30
+#define PCIP_SERIALBUS_USB_USB4 0x40
#define PCIP_SERIALBUS_USB_DEVICE 0xfe
#define PCIS_SERIALBUS_FC 0x04
#define PCIS_SERIALBUS_SMBUS 0x05
diff --git a/sys/dev/qcom_clk/qcom_clk_rcg2.c b/sys/dev/qcom_clk/qcom_clk_rcg2.c
index 0407706dd138..6a1962982184 100644
--- a/sys/dev/qcom_clk/qcom_clk_rcg2.c
+++ b/sys/dev/qcom_clk/qcom_clk_rcg2.c
@@ -370,7 +370,7 @@ qcom_clk_rcg2_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout,
device_printf(clknode_get_device(sc->clknode),
"%s: no suitable freqtbl entry found for freq %llu\n",
__func__,
- *fout);
+ (unsigned long long) *fout);
return (ERANGE);
}
@@ -475,7 +475,7 @@ qcom_clk_rcg2_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout,
*fout,
f->parent,
f->freq,
- p_freq);
+ (unsigned long long) p_freq);
/*
* To ensure glitch-free operation on some clocks, set it to
@@ -547,7 +547,7 @@ qcom_clk_rcg2_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout,
"%llu\n",
__func__,
f->parent,
- p_freq);
+ (unsigned long long) p_freq);
return (ENXIO);
}
@@ -570,7 +570,7 @@ qcom_clk_rcg2_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout,
*fout,
f->freq,
f->parent,
- p_freq);
+ (unsigned long long) p_freq);
/*
* Set the parent node, the parent programming and the divisor
diff --git a/sys/dev/qcom_gcc/qcom_gcc_clock.c b/sys/dev/qcom_gcc/qcom_gcc_clock.c
index c8c10b0c5172..f51b4021a821 100644
--- a/sys/dev/qcom_gcc/qcom_gcc_clock.c
+++ b/sys/dev/qcom_gcc/qcom_gcc_clock.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2025, Adrian Chadd <adrian@FreeBSD.org>
+ * Copyright (c) 2026 Adrian Chadd <adrian@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,6 +37,7 @@
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
+#include <sys/rman.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
diff --git a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c b/sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c
index f99d1d9ad9f1..127ca944c77e 100644
--- a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c
+++ b/sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c
@@ -38,6 +38,7 @@
#include <machine/bus.h>
#include <machine/resource.h>
+#include <sys/rman.h>
#include <sys/bus.h>
#include <dev/fdt/fdt_common.h>
@@ -136,7 +137,8 @@ qcom_gcc_ipq4018_hwreset_assert(device_t dev, intptr_t id, bool reset)
sc = device_get_softc(dev);
if (id > nitems(gcc_ipq4019_reset_list)) {
- device_printf(dev, "%s: invalid id (%d)\n", __func__, id);
+ device_printf(dev, "%s: invalid id (%d)\n", __func__,
+ (uint32_t) id);
return (EINVAL);
}
@@ -160,7 +162,8 @@ qcom_gcc_ipq4018_hwreset_is_asserted(device_t dev, intptr_t id, bool *reset)
sc = device_get_softc(dev);
if (id > nitems(gcc_ipq4019_reset_list)) {
- device_printf(dev, "%s: invalid id (%d)\n", __func__, id);
+ device_printf(dev, "%s: invalid id (%d)\n", __func__,
+ (uint32_t) id);
return (EINVAL);
}
mtx_lock(&sc->mtx);
@@ -171,7 +174,7 @@ qcom_gcc_ipq4018_hwreset_is_asserted(device_t dev, intptr_t id, bool *reset)
*reset = false;
mtx_unlock(&sc->mtx);
- device_printf(dev, "called; id=%d\n", id);
+ device_printf(dev, "called; id=%d\n", (uint32_t) id);
return (0);
}
diff --git a/sys/dev/qcom_gcc/qcom_gcc_main.c b/sys/dev/qcom_gcc/qcom_gcc_main.c
index 3950bd985feb..38f409827541 100644
--- a/sys/dev/qcom_gcc/qcom_gcc_main.c
+++ b/sys/dev/qcom_gcc/qcom_gcc_main.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2025, Adrian Chadd <adrian@FreeBSD.org>
+ * Copyright (c) 2026 Adrian Chadd <adrian@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,6 +39,7 @@
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
+#include <sys/rman.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
@@ -51,6 +52,7 @@
#include "qcom_gcc_var.h"
#include "qcom_gcc_ipq4018.h"
+#include "qcom_gcc_msm8916.h"
static int qcom_gcc_modevent(module_t, int, void *);
@@ -67,6 +69,8 @@ struct qcom_gcc_chipset_list_entry {
static struct qcom_gcc_chipset_list_entry qcom_gcc_chipset_list[] = {
{ "qcom,gcc-ipq4019", "Qualcomm IPQ4018 Clock/Reset Controller",
QCOM_GCC_CHIPSET_IPQ4018 },
+ { "qcom,gcc-msm8916", "Qualcomm MSM8916 Clock/Reset Controller",
+ QCOM_GCC_CHIPSET_MSM8916 },
{ NULL, NULL, 0 },
};
@@ -135,6 +139,10 @@ qcom_gcc_attach(device_t dev)
qcom_gcc_ipq4018_hwreset_init(sc);
mem_sz = 0x60000;
break;
+ case QCOM_GCC_CHIPSET_MSM8916:
+ qcom_gcc_msm8916_hwreset_init(sc);
+ mem_sz = 0x0;
+ break;
case QCOM_GCC_CHIPSET_NONE:
device_printf(dev, "Invalid chipset (%d)\n", sc->sc_chipset);
return (ENXIO);
@@ -142,8 +150,13 @@ qcom_gcc_attach(device_t dev)
sc->reg_rid = 0;
- sc->reg = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY,
- &sc->reg_rid, mem_sz, RF_ACTIVE);
+ if (mem_sz != 0)
+ sc->reg = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY,
+ &sc->reg_rid, mem_sz, RF_ACTIVE);
+ else
+ sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->reg_rid, RF_ACTIVE);
+
if (sc->reg == NULL) {
device_printf(dev, "Couldn't allocate memory resource!\n");
return (ENXIO);
@@ -163,6 +176,9 @@ qcom_gcc_attach(device_t dev)
case QCOM_GCC_CHIPSET_IPQ4018:
qcom_gcc_ipq4018_clock_setup(sc);
break;
+ case QCOM_GCC_CHIPSET_MSM8916:
+ qcom_gcc_msm8916_clock_setup(sc);
+ break;
case QCOM_GCC_CHIPSET_NONE:
device_printf(dev, "Invalid chipset (%d)\n", sc->sc_chipset);
return (ENXIO);
diff --git a/sys/dev/qcom_gcc/qcom_gcc_msm8916.h b/sys/dev/qcom_gcc/qcom_gcc_msm8916.h
new file mode 100644
index 000000000000..10758b0744a4
--- /dev/null
+++ b/sys/dev/qcom_gcc/qcom_gcc_msm8916.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2026 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __QCOM_GCC_MSM8916_H__
+#define __QCOM_GCC_MSM8916_H__
+
+/*
+ * reset block
+ */
+extern void qcom_gcc_msm8916_hwreset_init(struct qcom_gcc_softc *);
+
+/*
+ * clock block
+ */
+extern void qcom_gcc_msm8916_clock_setup(struct qcom_gcc_softc *);
+
+#endif /* __QCOM_GCC_MSM8916_H__ */
diff --git a/sys/dev/qcom_gcc/qcom_gcc_msm8916_clock.c b/sys/dev/qcom_gcc/qcom_gcc_msm8916_clock.c
new file mode 100644
index 000000000000..5c0e4afcc17f
--- /dev/null
+++ b/sys/dev/qcom_gcc/qcom_gcc_msm8916_clock.c
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kthread.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include "qcom_gcc_var.h"
+#include "qcom_gcc_msm8916.h"
+
+#define GCC_QDSS_BCR 0x29000
+#define GCC_QDSS_BCR_BLK_ARES (1 << 0) /* Async software reset. */
+#define GCC_QDSS_CFG_AHB_CBCR 0x29008
+#define AHB_CBCR_CLK_ENABLE (1 << 0) /* AHB clk branch ctrl */
+#define GCC_QDSS_ETR_USB_CBCR 0x29028
+#define ETR_USB_CBCR_CLK_ENABLE (1 << 0) /* ETR USB clk branch ctrl */
+#define GCC_QDSS_DAP_CBCR 0x29084
+#define DAP_CBCR_CLK_ENABLE (1 << 0) /* DAP clk branch ctrl */
+
+/*
+ * Qualcomm Debug Subsystem (QDSS)
+ * block enabling routine.
+ */
+static void
+qcom_msm8916_qdss_enable(struct qcom_gcc_softc *sc)
+{
+
+ /* Put QDSS block to reset */
+ bus_write_4(sc->reg, GCC_QDSS_BCR, GCC_QDSS_BCR_BLK_ARES);
+
+ /* Enable AHB clock branch */
+ bus_write_4(sc->reg, GCC_QDSS_CFG_AHB_CBCR, AHB_CBCR_CLK_ENABLE);
+
+ /* Enable DAP clock branch */
+ bus_write_4(sc->reg, GCC_QDSS_DAP_CBCR, DAP_CBCR_CLK_ENABLE);
+
+ /* Enable ETR USB clock branch */
+ bus_write_4(sc->reg, GCC_QDSS_ETR_USB_CBCR, ETR_USB_CBCR_CLK_ENABLE);
+
+ /* Out of reset */
+ bus_write_4(sc->reg, GCC_QDSS_BCR, 0);
+}
+
+void
+qcom_gcc_msm8916_clock_setup(struct qcom_gcc_softc *sc)
+{
+ qcom_msm8916_qdss_enable(sc);
+}
diff --git a/sys/dev/qcom_gcc/qcom_gcc_msm8916_reset.c b/sys/dev/qcom_gcc/qcom_gcc_msm8916_reset.c
new file mode 100644
index 000000000000..c83fd3e981ab
--- /dev/null
+++ b/sys/dev/qcom_gcc/qcom_gcc_msm8916_reset.c
@@ -0,0 +1,71 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2026 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/sglist.h>
+#include <sys/random.h>
+#include <sys/stdatomic.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/hwreset/hwreset.h>
+
+#include "hwreset_if.h"
+
+#include "qcom_gcc_var.h"
+#include "qcom_gcc_msm8916.h"
+
+static int
+qcom_gcc_msm8916_hwreset_assert(device_t dev, intptr_t id, bool reset)
+{
+ device_printf(dev, "%s: invalid id (%d)\n", __func__, (uint32_t) id);
+ return (EINVAL);
+}
+
+static int
+qcom_gcc_msm8916_hwreset_is_asserted(device_t dev, intptr_t id, bool *reset)
+{
+ device_printf(dev, "%s: invalid id (%d)\n", __func__, (uint32_t) id);
+ return (EINVAL);
+}
+
+void
+qcom_gcc_msm8916_hwreset_init(struct qcom_gcc_softc *sc)
+{
+ sc->sc_cb.hw_reset_assert = qcom_gcc_msm8916_hwreset_assert;
+ sc->sc_cb.hw_reset_is_asserted = qcom_gcc_msm8916_hwreset_is_asserted;
+}
diff --git a/sys/dev/qcom_gcc/qcom_gcc_var.h b/sys/dev/qcom_gcc/qcom_gcc_var.h
index 2d4e969e1134..e3796f0d5f0f 100644
--- a/sys/dev/qcom_gcc/qcom_gcc_var.h
+++ b/sys/dev/qcom_gcc/qcom_gcc_var.h
@@ -31,6 +31,7 @@
typedef enum {
QCOM_GCC_CHIPSET_NONE = 0,
QCOM_GCC_CHIPSET_IPQ4018 = 1,
+ QCOM_GCC_CHIPSET_MSM8916 = 2,
} qcom_gcc_chipset_t;
struct qcom_gcc_reset_entry {
diff --git a/sys/dev/rge/if_rge.c b/sys/dev/rge/if_rge.c
index b2f1311b4c87..8887e8d39ae4 100644
--- a/sys/dev/rge/if_rge.c
+++ b/sys/dev/rge/if_rge.c
@@ -103,12 +103,7 @@ static void rge_tx_task(void *, int);
static void rge_txq_flush_mbufs(struct rge_softc *sc);
static void rge_tick(void *);
static void rge_link_state(struct rge_softc *);
-#if 0
-#ifndef SMALL_KERNEL
-int rge_wol(struct ifnet *, int);
-void rge_wol_power(struct rge_softc *);
-#endif
-#endif
+static void rge_setwol(struct rge_softc *);
struct rge_matchid {
uint16_t vendor;
@@ -161,7 +156,11 @@ rge_attach_if(struct rge_softc *sc, const char *eaddr)
if_setcapabilities(sc->sc_ifp, IFCAP_HWCSUM);
if_setcapenable(sc->sc_ifp, if_getcapabilities(sc->sc_ifp));
- /* TODO: set WOL */
+ /* Enable WOL if PM is supported. */
+ if (pci_has_pm(sc->sc_dev)) {
+ if_setcapabilitiesbit(sc->sc_ifp, IFCAP_WOL_MAGIC, 0);
+ if_setcapenablebit(sc->sc_ifp, IFCAP_WOL_MAGIC, 0);
+ }
/* Attach interface */
ether_ifattach(sc->sc_ifp, eaddr);
@@ -446,23 +445,19 @@ rge_attach(device_t dev)
rge_config_imtype(sc, RGE_IMTYPE_SIM);
- /* TODO: disable ASPM/ECPM? */
-
-#if 0
- /*
- * PCI Express check.
- */
- if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
- &offset, NULL)) {
- /* Disable PCIe ASPM and ECPM. */
- reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
- offset + PCI_PCIE_LCSR);
- reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
- PCI_PCIE_LCSR_ECPM);
- pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
- reg);
+ /* Disable PCIe ASPM and ECPM if requested. */
+ if (sc->sc_disable_aspm) {
+ int ecap;
+ if (pci_find_cap(dev, PCIY_EXPRESS, &ecap) == 0) {
+ uint16_t lctl;
+ lctl = pci_read_config(dev,
+ ecap + PCIER_LINK_CTL, 2);
+ lctl &= ~(PCIEM_LINK_CTL_ASPMC |
+ PCIEM_LINK_CTL_ECPM);
+ pci_write_config(dev,
+ ecap + PCIER_LINK_CTL, lctl, 2);
+ }
}
-#endif
RGE_LOCK(sc);
if (rge_chipinit(sc)) {
@@ -654,26 +649,6 @@ rge_detach(device_t dev)
return (0);
}
-#if 0
-
-int
-rge_activate(struct device *self, int act)
-{
-#ifndef SMALL_KERNEL
- struct rge_softc *sc = (struct rge_softc *)self;
-#endif
-
- switch (act) {
- case DVACT_POWERDOWN:
-#ifndef SMALL_KERNEL
- rge_wol_power(sc);
-#endif
- break;
- }
- return (0);
-}
-#endif
-
static void
rge_intr_msi(void *arg)
{
@@ -959,29 +934,24 @@ rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
case SIOCSIFFLAGS:
RGE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
- if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
- /*
- * TODO: handle promisc/iffmulti changing
- * without reprogramming everything.
- */
+ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
+ if (((if_getflags(ifp) ^ sc->rge_if_flags)
+ & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
+ rge_iff_locked(sc);
+ } else
rge_init_locked(sc);
- } else {
- /* Reinit promisc/multi just in case */
- rge_iff_locked(sc);
- }
} else {
- if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
+ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
rge_stop_locked(sc);
- }
}
+ sc->rge_if_flags = if_getflags(ifp);
RGE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
RGE_LOCK(sc);
- if ((if_getflags(ifp) & IFF_DRV_RUNNING) != 0) {
+ if ((if_getflags(ifp) & IFF_DRV_RUNNING) != 0)
rge_iff_locked(sc);
- }
RGE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
@@ -1019,7 +989,9 @@ rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
reinit = 1;
}
- /* TODO: WOL */
+ if ((mask & IFCAP_WOL_MAGIC) != 0 &&
+ (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
+ if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
@@ -1125,7 +1097,7 @@ rge_init_locked(struct rge_softc *sc)
* causing this to be initialised both from the ioctl
* API and if_init() API.
*/
-// RGE_PRINT_ERROR(sc, "%s: called whilst running?\n", __func__);
+/* RGE_PRINT_ERROR(sc, "%s: called whilst running?\n", __func__); */
return;
}
@@ -2104,9 +2076,10 @@ rge_rxeof(struct rge_queues *q, struct mbufq *mq)
uint32_t rxstat, extsts;
int i, mlen, rx = 0;
int cons, prod;
- int maxpkt = 16; /* XXX TODO: make this a tunable */
+ int maxpkt;
bool check_hwcsum;
+ maxpkt = sc->sc_rx_process_limit;
check_hwcsum = ((if_getcapenable(sc->sc_ifp) & IFCAP_RXCSUM) != 0);
RGE_ASSERT_LOCKED(sc);
@@ -2409,7 +2382,7 @@ rge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *hashes = arg;
- // XXX TODO: validate this does addrlo? */
+ /* XXX TODO: validate this does addrlo? */
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
crc &= 0x3f;
@@ -2624,6 +2597,22 @@ rge_link_state(struct rge_softc *sc)
}
}
+static void
+rge_setwol(struct rge_softc *sc)
+{
+ if_t ifp = sc->sc_ifp;
+ int enable;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (!pci_has_pm(sc->sc_dev))
+ return;
+
+ enable = (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0;
+
+ rge_wol_config(sc, enable);
+}
+
/**
* @brief Suspend
*/
@@ -2634,7 +2623,7 @@ rge_suspend(device_t dev)
RGE_LOCK(sc);
rge_stop_locked(sc);
- /* TODO: wake on lan */
+ rge_setwol(sc);
sc->sc_suspended = true;
RGE_UNLOCK(sc);
@@ -2650,7 +2639,6 @@ rge_resume(device_t dev)
struct rge_softc *sc = device_get_softc(dev);
RGE_LOCK(sc);
- /* TODO: wake on lan */
/* reinit if required */
if (if_getflags(sc->sc_ifp) & IFF_UP)
@@ -2673,6 +2661,7 @@ rge_shutdown(device_t dev)
RGE_LOCK(sc);
rge_stop_locked(sc);
+ rge_setwol(sc);
RGE_UNLOCK(sc);
return (0);
diff --git a/sys/dev/rge/if_rge_hw.c b/sys/dev/rge/if_rge_hw.c
index 35a0e93dd193..ba01e389af14 100644
--- a/sys/dev/rge/if_rge_hw.c
+++ b/sys/dev/rge/if_rge_hw.c
@@ -2196,50 +2196,37 @@ rge_get_link_status(struct rge_softc *sc)
return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
}
-#if 0
-#ifndef SMALL_KERNEL
-int
-rge_wol(struct ifnet *ifp, int enable)
+void
+rge_wol_config(struct rge_softc *sc, int enable)
{
- struct rge_softc *sc = ifp->if_softc;
-
- if (enable) {
- if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
- printf("%s: power management is disabled, "
- "cannot do WOL\n", sc->sc_dev.dv_xname);
- return (ENOTSUP);
- }
-
- }
-
- rge_iff(sc);
-
if (enable)
RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
else
RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
+ /* Enable config register write. */
RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
- RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
- RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
+
+ /* Clear all WOL bits, then set as requested. */
RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
- if (enable)
+ RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE |
+ RGE_CFG5_WOL_UCAST | RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
+ if (enable) {
+ RGE_SETBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_MAGIC);
RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
- RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
+ }
- return (0);
-}
+ /* Config register write done. */
+ RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
-void
-rge_wol_power(struct rge_softc *sc)
-{
- /* Disable RXDV gate. */
- RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
- DELAY(2000);
+ if (enable) {
+ /* Disable RXDV gate so WOL packets can reach the NIC. */
+ RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
+ DELAY(2000);
- RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
- RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
+ /* Enable power management. */
+ RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
+ RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
+ }
}
-#endif
-#endif
diff --git a/sys/dev/rge/if_rge_hw.h b/sys/dev/rge/if_rge_hw.h
index 86f0da7c87b3..4e6ee5f1975f 100644
--- a/sys/dev/rge/if_rge_hw.h
+++ b/sys/dev/rge/if_rge_hw.h
@@ -37,5 +37,6 @@ extern uint16_t rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
extern void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
extern uint16_t rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg);
extern int rge_get_link_status(struct rge_softc *);
+extern void rge_wol_config(struct rge_softc *, int);
#endif /* __IF_RGE_HW_H__ */
diff --git a/sys/dev/rge/if_rge_sysctl.c b/sys/dev/rge/if_rge_sysctl.c
index a7d6e1572168..75e2316042ea 100644
--- a/sys/dev/rge/if_rge_sysctl.c
+++ b/sys/dev/rge/if_rge_sysctl.c
@@ -232,6 +232,16 @@ rge_sysctl_attach(struct rge_softc *sc)
"debug", CTLFLAG_RW, &sc->sc_debug, 0,
"control debugging printfs");
+ sc->sc_rx_process_limit = 16;
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "rx_process_limit", CTLFLAG_RW, &sc->sc_rx_process_limit, 0,
+ "max number of RX packets to process per interrupt");
+
+ sc->sc_disable_aspm = 0;
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "disable_aspm", CTLFLAG_RDTUN, &sc->sc_disable_aspm, 0,
+ "disable PCIe ASPM and ECPM (requires reboot)");
+
/* Stats */
rge_sysctl_drv_stats_attach(sc);
rge_sysctl_mac_stats_attach(sc);
diff --git a/sys/dev/rge/if_rgevar.h b/sys/dev/rge/if_rgevar.h
index 924133da45e3..2e80dcf42187 100644
--- a/sys/dev/rge/if_rgevar.h
+++ b/sys/dev/rge/if_rgevar.h
@@ -123,7 +123,6 @@ struct rge_rx {
int rge_rxq_prodidx;
int rge_rxq_considx;
-// struct if_rxring rge_rx_ring;
bus_addr_t rge_rx_list_paddr;
bus_dmamap_t rge_rx_list_map;
struct rge_rx_desc *rge_rx_list;
@@ -137,7 +136,6 @@ struct rge_queues {
void *q_ihc;
int q_index;
char q_name[16];
-// pci_intr_handle_t q_ih;
struct rge_tx q_tx;
struct rge_rx q_rx;
};
@@ -171,8 +169,6 @@ struct rge_softc {
bus_dma_tag_t sc_dmat_rx_buf;
bus_dma_tag_t sc_dmat_stats_buf;
-// pci_chipset_tag_t sc_pc;
-// pcitag_t sc_tag;
struct ifmedia sc_media; /* media info */
enum rge_mac_type rge_type;
@@ -204,8 +200,13 @@ struct rge_softc {
#define RGE_IMTYPE_SIM 1
int sc_watchdog;
+ int rge_if_flags;
+
uint32_t sc_debug;
+ int sc_rx_process_limit;
+ int sc_disable_aspm;
+
struct rge_drv_stats sc_drv_stats;
struct rge_mac_stats sc_mac_stats;
diff --git a/sys/dev/sound/midi/midi.c b/sys/dev/sound/midi/midi.c
index 4fd0e3dcf134..e70f76a44ed9 100644
--- a/sys/dev/sound/midi/midi.c
+++ b/sys/dev/sound/midi/midi.c
@@ -421,7 +421,7 @@ midi_write(struct cdev *i_dev, struct uio *uio, int ioflag)
int used;
char buf[MIDI_WSIZE];
- retval = 0;
+ retval = EIO;
if (m == NULL)
goto err0;
diff --git a/sys/dev/sound/pcm/ac97.c b/sys/dev/sound/pcm/ac97.c
index 14ff2f6a62ab..73a1e0280e56 100644
--- a/sys/dev/sound/pcm/ac97.c
+++ b/sys/dev/sound/pcm/ac97.c
@@ -125,12 +125,7 @@ static const struct ac97_vendorid ac97vendorid[] = {
{ 0x57454300, "Winbond" },
{ 0x574d4c00, "Wolfson" },
{ 0x594d4800, "Yamaha" },
- /*
- * XXX This is a fluke, really! The real vendor
- * should be SigmaTel, not this! This should be
- * removed someday!
- */
- { 0x01408300, "Creative" },
+ { 0x01408300, "SigmaTel" },
{ 0x00000000, NULL }
};
@@ -238,12 +233,7 @@ static struct ac97_codecid ac97codecid[] = {
{ 0x594d4800, 0x00, 0, "YMF743", 0 },
{ 0x594d4802, 0x00, 0, "YMF752", 0 },
{ 0x594d4803, 0x00, 0, "YMF753", 0 },
- /*
- * XXX This is a fluke, really! The real codec
- * should be STAC9704, not this! This should be
- * removed someday!
- */
- { 0x01408384, 0x00, 0, "EV1938", 0 },
+ { 0x01408384, 0x00, 0, "STAC9704", 0 },
{ 0, 0, 0, NULL, 0 }
};
@@ -1104,10 +1094,6 @@ ac97mix_uninit(struct snd_mixer *m)
if (codec == NULL)
return -1;
- /*
- if (ac97_uninitmixer(codec))
- return -1;
- */
ac97_destroy(codec);
return 0;
}
diff --git a/sys/dev/sound/pcm/buffer.c b/sys/dev/sound/pcm/buffer.c
index 1db9e5661dc8..0c574ae2908c 100644
--- a/sys/dev/sound/pcm/buffer.c
+++ b/sys/dev/sound/pcm/buffer.c
@@ -506,29 +506,11 @@ sndbuf_dispose(struct snd_dbuf *b, u_int8_t *to, unsigned int count)
return 0;
}
-#ifdef SND_DIAGNOSTIC
-static uint32_t snd_feeder_maxfeed = 0;
-SYSCTL_UINT(_hw_snd, OID_AUTO, feeder_maxfeed, CTLFLAG_RD,
- &snd_feeder_maxfeed, 0, "maximum feeder count request");
-
-static uint32_t snd_feeder_maxcycle = 0;
-SYSCTL_UINT(_hw_snd, OID_AUTO, feeder_maxcycle, CTLFLAG_RD,
- &snd_feeder_maxcycle, 0, "maximum feeder cycle");
-#endif
-
/* count is number of bytes we want added to destination buffer */
int
sndbuf_feed(struct snd_dbuf *from, struct snd_dbuf *to, struct pcm_channel *channel, struct pcm_feeder *feeder, unsigned int count)
{
unsigned int cnt, maxfeed;
-#ifdef SND_DIAGNOSTIC
- unsigned int cycle;
-
- if (count > snd_feeder_maxfeed)
- snd_feeder_maxfeed = count;
-
- cycle = 0;
-#endif
KASSERT(count > 0, ("can't feed 0 bytes"));
@@ -544,16 +526,8 @@ sndbuf_feed(struct snd_dbuf *from, struct snd_dbuf *to, struct pcm_channel *chan
break;
sndbuf_acquire(to, to->tmpbuf, cnt);
count -= cnt;
-#ifdef SND_DIAGNOSTIC
- cycle++;
-#endif
} while (count != 0);
-#ifdef SND_DIAGNOSTIC
- if (cycle > snd_feeder_maxcycle)
- snd_feeder_maxcycle = cycle;
-#endif
-
return (0);
}
diff --git a/sys/dev/sound/pcm/channel.c b/sys/dev/sound/pcm/channel.c
index b74f76fd21ca..c1e0d8d3bc52 100644
--- a/sys/dev/sound/pcm/channel.c
+++ b/sys/dev/sound/pcm/channel.c
@@ -143,7 +143,7 @@ chn_vpc_proc(int reset, int db)
PCM_ACQUIRE(d);
CHN_FOREACH(c, d, channels.pcm) {
CHN_LOCK(c);
- CHN_SETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_VOL_0DB, db);
+ chn_setvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_VOL_0DB, db);
if (reset != 0)
chn_vpc_reset(c, SND_VOL_C_PCM, 1);
CHN_UNLOCK(c);
@@ -1059,7 +1059,7 @@ snd_afmt2str(uint32_t afmt, char *buf, size_t len)
int
chn_reset(struct pcm_channel *c, uint32_t fmt, uint32_t spd)
{
- int r;
+ int err;
CHN_LOCKASSERT(c);
c->feedcount = 0;
@@ -1071,23 +1071,26 @@ chn_reset(struct pcm_channel *c, uint32_t fmt, uint32_t spd)
c->flags |= (pcm_getflags(c->dev) & SD_F_BITPERFECT) ?
CHN_F_BITPERFECT : 0;
- r = CHANNEL_RESET(c->methods, c->devinfo);
- if (r == 0 && fmt != 0 && spd != 0) {
- r = chn_setparam(c, fmt, spd);
+ if ((err = CHANNEL_RESET(c->methods, c->devinfo)))
+ return (err);
+
+ if (fmt != 0 && spd != 0) {
+ if ((err = chn_setparam(c, fmt, spd)))
+ return (err);
fmt = 0;
spd = 0;
}
- if (r == 0 && fmt != 0)
- r = chn_setformat(c, fmt);
- if (r == 0 && spd != 0)
- r = chn_setspeed(c, spd);
- if (r == 0)
- r = chn_setlatency(c, chn_latency);
- if (r == 0) {
- chn_resetbuf(c);
- r = CHANNEL_RESETDONE(c->methods, c->devinfo);
- }
- return r;
+ if (fmt != 0 && (err = chn_setformat(c, fmt)))
+ return (err);
+ if (spd != 0 && (err = chn_setspeed(c, spd)))
+ return (err);
+
+ if ((err = chn_setlatency(c, chn_latency)))
+ return (err);
+
+ chn_resetbuf(c);
+
+ return (CHANNEL_RESETDONE(c->methods, c->devinfo));
}
static struct unrhdr *
@@ -1144,7 +1147,6 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
int dir, void *devinfo)
{
struct pcm_channel *c;
- struct feeder_class *fc;
struct snd_dbuf *b, *bs;
char buf[CHN_NAMELEN];
int err, i, direction, *vchanrate, *vchanformat;
@@ -1217,17 +1219,6 @@ chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
chn_vpc_reset(c, SND_VOL_C_PCM, 1);
CHN_UNLOCK(c);
- fc = feeder_getclass(FEEDER_ROOT);
- if (fc == NULL) {
- device_printf(d->dev, "%s(): failed to get feeder class\n",
- __func__);
- goto fail;
- }
- if (feeder_add(c, fc, NULL)) {
- device_printf(d->dev, "%s(): failed to add feeder\n", __func__);
- goto fail;
- }
-
b = sndbuf_create(c, "primary");
bs = sndbuf_create(c, "secondary");
if (b == NULL || bs == NULL) {
@@ -1647,7 +1638,7 @@ chn_vpc_reset(struct pcm_channel *c, int vc, int force)
return;
for (i = SND_CHN_T_BEGIN; i <= SND_CHN_T_END; i += SND_CHN_T_STEP)
- CHN_SETVOLUME(c, vc, i, c->volume[vc][SND_CHN_T_VOL_0DB]);
+ chn_setvolume_matrix(c, vc, i, c->volume[vc][SND_CHN_T_VOL_0DB]);
}
static u_int32_t
@@ -1760,19 +1751,6 @@ round_blksz(u_int32_t v, int round)
* aggressively through possibly real time programming technique.
*
*/
-#define CHN_LATENCY_PBLKCNT_REF \
- {{1, 2, 3, 4, 5, 5, 4, 3, 2, 1, 1}, \
- {1, 2, 3, 4, 5, 5, 4, 3, 2, 1, 1}}
-#define CHN_LATENCY_PBUFSZ_REF \
- {{7, 9, 12, 13, 14, 15, 15, 15, 15, 15, 16}, \
- {11, 12, 13, 14, 15, 16, 16, 16, 16, 16, 17}}
-
-#define CHN_LATENCY_RBLKCNT_REF \
- {{9, 8, 7, 6, 5, 5, 4, 3, 2, 1, 1}, \
- {9, 8, 7, 6, 5, 5, 4, 3, 2, 1, 1}}
-#define CHN_LATENCY_RBUFSZ_REF \
- {{14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 16}, \
- {15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17}}
#define CHN_LATENCY_DATA_REF 192000 /* 48khz stereo 16bit ~ 48000 x 2 x 2 */
@@ -1780,14 +1758,22 @@ static int
chn_calclatency(int dir, int latency, int bps, u_int32_t datarate,
u_int32_t max, int *rblksz, int *rblkcnt)
{
- static int pblkcnts[CHN_LATENCY_PROFILE_MAX + 1][CHN_LATENCY_MAX + 1] =
- CHN_LATENCY_PBLKCNT_REF;
- static int pbufszs[CHN_LATENCY_PROFILE_MAX + 1][CHN_LATENCY_MAX + 1] =
- CHN_LATENCY_PBUFSZ_REF;
- static int rblkcnts[CHN_LATENCY_PROFILE_MAX + 1][CHN_LATENCY_MAX + 1] =
- CHN_LATENCY_RBLKCNT_REF;
- static int rbufszs[CHN_LATENCY_PROFILE_MAX + 1][CHN_LATENCY_MAX + 1] =
- CHN_LATENCY_RBUFSZ_REF;
+ static int pblkcnts[CHN_LATENCY_PROFILE_MAX + 1][CHN_LATENCY_MAX + 1] = {
+ {1, 2, 3, 4, 5, 5, 4, 3, 2, 1, 1},
+ {1, 2, 3, 4, 5, 5, 4, 3, 2, 1, 1}
+ };
+ static int pbufszs[CHN_LATENCY_PROFILE_MAX + 1][CHN_LATENCY_MAX + 1] = {
+ {7, 9, 12, 13, 14, 15, 15, 15, 15, 15, 16},
+ {11, 12, 13, 14, 15, 16, 16, 16, 16, 16, 17}
+ };
+ static int rblkcnts[CHN_LATENCY_PROFILE_MAX + 1][CHN_LATENCY_MAX + 1] = {
+ {9, 8, 7, 6, 5, 5, 4, 3, 2, 1, 1},
+ {9, 8, 7, 6, 5, 5, 4, 3, 2, 1, 1}
+ };
+ static int rbufszs[CHN_LATENCY_PROFILE_MAX + 1][CHN_LATENCY_MAX + 1] = {
+ {14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 16},
+ {15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17}
+ };
u_int32_t bufsz;
int lprofile, blksz, blkcnt;
@@ -2361,33 +2347,9 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
* If the hwchan is running, we can't change its rate, format or
* blocksize
*/
- run = (CHN_STARTED(c)) ? 1 : 0;
+ run = CHN_STARTED(c);
if (run)
- flags &= CHN_N_VOLUME | CHN_N_TRIGGER;
-
- if (flags & CHN_N_RATE) {
- /*
- * XXX I'll make good use of this someday.
- * However this is currently being superseded by
- * the availability of CHN_F_VCHAN_DYNAMIC.
- */
- }
-
- if (flags & CHN_N_FORMAT) {
- /*
- * XXX I'll make good use of this someday.
- * However this is currently being superseded by
- * the availability of CHN_F_VCHAN_DYNAMIC.
- */
- }
-
- if (flags & CHN_N_VOLUME) {
- /*
- * XXX I'll make good use of this someday, though
- * soft volume control is currently pretty much
- * integrated.
- */
- }
+ flags &= CHN_N_TRIGGER;
if (flags & CHN_N_BLOCKSIZE) {
/*
@@ -2396,16 +2358,17 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
chn_setlatency(c, chn_latency);
}
- if ((flags & CHN_N_TRIGGER) && !(c->flags & CHN_F_VCHAN_DYNAMIC)) {
- nrun = CHN_EMPTY(c, children.busy) ? 0 : 1;
- if (nrun && !run)
- err = chn_start(c, 1);
- if (!nrun && run)
- chn_abort(c);
- flags &= ~CHN_N_TRIGGER;
- }
-
if (flags & CHN_N_TRIGGER) {
+ if (!(c->flags & CHN_F_VCHAN_DYNAMIC)) {
+ nrun = !CHN_EMPTY(c, children.busy);
+ if (nrun && !run)
+ err = chn_start(c, 1);
+ if (!nrun && run)
+ chn_abort(c);
+
+ return (err);
+ }
+
if (c->direction == PCMDIR_PLAY) {
vchanformat = &c->parentsnddev->pvchanformat;
vchanrate = &c->parentsnddev->pvchanrate;
@@ -2414,7 +2377,6 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
vchanrate = &c->parentsnddev->rvchanrate;
}
- /* Dynamic Virtual Channel */
if (!(c->flags & CHN_F_VCHAN_ADAPTIVE)) {
bestformat = *vchanformat;
bestspeed = *vchanrate;
@@ -2430,6 +2392,7 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
vpflags = 0;
CHN_FOREACH(ch, c, children.busy) {
+ nrun++;
CHN_LOCK(ch);
if ((ch->format & AFMT_PASSTHROUGH) &&
snd_fmtvalid(ch->format, caps->fmtlist)) {
@@ -2437,7 +2400,6 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
bestspeed = ch->speed;
CHN_UNLOCK(ch);
vpflags = CHN_F_PASSTHROUGH;
- nrun++;
break;
}
if ((ch->flags & CHN_F_EXCLUSIVE) && vpflags == 0) {
@@ -2452,13 +2414,11 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
}
CHN_UNLOCK(ch);
vpflags = CHN_F_EXCLUSIVE;
- nrun++;
continue;
}
if (!(c->flags & CHN_F_VCHAN_ADAPTIVE) ||
vpflags != 0) {
CHN_UNLOCK(ch);
- nrun++;
continue;
}
if (ch->speed > bestspeed) {
@@ -2469,7 +2429,6 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
besthwformat = snd_fmtbest(ch->format, caps->fmtlist);
if (!(besthwformat & AFMT_VCHAN)) {
CHN_UNLOCK(ch);
- nrun++;
continue;
}
if (AFMT_CHANNEL(besthwformat) >
@@ -2480,7 +2439,6 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
AFMT_BIT(besthwformat) > AFMT_BIT(bestformat))
bestformat = besthwformat;
CHN_UNLOCK(ch);
- nrun++;
}
if (bestformat == 0)
@@ -2500,17 +2458,16 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
c->devinfo, bestspeed);
err = chn_reset(c, bestformat, bestspeed);
}
- if (err == 0 && dirty) {
- CHN_FOREACH(ch, c, children.busy) {
- CHN_LOCK(ch);
- if (VCHAN_SYNC_REQUIRED(ch))
- vchan_sync(ch);
- CHN_UNLOCK(ch);
- }
- }
if (err == 0) {
- if (dirty)
+ if (dirty) {
+ CHN_FOREACH(ch, c, children.busy) {
+ CHN_LOCK(ch);
+ if (VCHAN_SYNC_REQUIRED(ch))
+ vchan_sync(ch);
+ CHN_UNLOCK(ch);
+ }
c->flags |= CHN_F_DIRTY;
+ }
err = chn_start(c, 1);
}
}
@@ -2527,8 +2484,6 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
vchan_sync(ch);
CHN_UNLOCK(ch);
}
- }
- if (err == 0) {
c->flags |= CHN_F_DIRTY;
err = chn_start(c, 1);
}
diff --git a/sys/dev/sound/pcm/channel.h b/sys/dev/sound/pcm/channel.h
index 6415f5c88984..6fa4338dce4a 100644
--- a/sys/dev/sound/pcm/channel.h
+++ b/sys/dev/sound/pcm/channel.h
@@ -255,7 +255,6 @@ struct pcm_channel {
#include "channel_if.h"
-int chn_reinit(struct pcm_channel *c);
int chn_write(struct pcm_channel *c, struct uio *buf);
int chn_read(struct pcm_channel *c, struct uio *buf);
u_int32_t chn_start(struct pcm_channel *c, int force);
@@ -306,15 +305,6 @@ int chn_notify(struct pcm_channel *c, u_int32_t flags);
int chn_getrates(struct pcm_channel *c, int **rates);
int chn_syncdestroy(struct pcm_channel *c);
-#define CHN_SETVOLUME(...) chn_setvolume_matrix(__VA_ARGS__)
-#if defined(SND_DIAGNOSTIC) || defined(INVARIANTS)
-#define CHN_GETVOLUME(...) chn_getvolume_matrix(__VA_ARGS__)
-#else
-#define CHN_GETVOLUME(x, y, z) ((x)->volume[y][z])
-#endif
-
-#define CHN_GETMUTE(x, y, z) ((x)->muted[y][z])
-
#ifdef OSSV4_EXPERIMENT
int chn_getpeaks(struct pcm_channel *c, int *lpeak, int *rpeak);
#endif
@@ -415,11 +405,8 @@ enum {
-#define CHN_N_RATE 0x00000001
-#define CHN_N_FORMAT 0x00000002
-#define CHN_N_VOLUME 0x00000004
-#define CHN_N_BLOCKSIZE 0x00000008
-#define CHN_N_TRIGGER 0x00000010
+#define CHN_N_BLOCKSIZE 0x00000001
+#define CHN_N_TRIGGER 0x00000002
#define CHN_LATENCY_MIN 0
#define CHN_LATENCY_MAX 10
diff --git a/sys/dev/sound/pcm/dsp.c b/sys/dev/sound/pcm/dsp.c
index c1e836691ac7..797bfba81023 100644
--- a/sys/dev/sound/pcm/dsp.c
+++ b/sys/dev/sound/pcm/dsp.c
@@ -607,8 +607,9 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *ch,
case MIXER_READ(0):
switch (j) {
case SOUND_MIXER_MUTE:
- mute = CHN_GETMUTE(ch, SND_VOL_C_PCM, SND_CHN_T_FL) ||
- CHN_GETMUTE(ch, SND_VOL_C_PCM, SND_CHN_T_FR);
+ mute = chn_getmute_matrix(ch,
+ SND_VOL_C_PCM, SND_CHN_T_FL) ||
+ chn_getmute_matrix(ch, SND_VOL_C_PCM, SND_CHN_T_FR);
if (ch->direction == PCMDIR_REC) {
*(int *)arg = mute << SOUND_MIXER_RECLEV;
} else {
@@ -618,17 +619,17 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *ch,
case SOUND_MIXER_PCM:
if (ch->direction != PCMDIR_PLAY)
break;
- *(int *)arg = CHN_GETVOLUME(ch,
+ *(int *)arg = chn_getvolume_matrix(ch,
SND_VOL_C_PCM, SND_CHN_T_FL);
- *(int *)arg |= CHN_GETVOLUME(ch,
+ *(int *)arg |= chn_getvolume_matrix(ch,
SND_VOL_C_PCM, SND_CHN_T_FR) << 8;
break;
case SOUND_MIXER_RECLEV:
if (ch->direction != PCMDIR_REC)
break;
- *(int *)arg = CHN_GETVOLUME(ch,
+ *(int *)arg = chn_getvolume_matrix(ch,
SND_VOL_C_PCM, SND_CHN_T_FL);
- *(int *)arg |= CHN_GETVOLUME(ch,
+ *(int *)arg |= chn_getvolume_matrix(ch,
SND_VOL_C_PCM, SND_CHN_T_FR) << 8;
break;
case SOUND_MIXER_DEVMASK:
diff --git a/sys/dev/sound/pcm/feeder.c b/sys/dev/sound/pcm/feeder.c
index 2a7f54e5d30f..af6e367895a0 100644
--- a/sys/dev/sound/pcm/feeder.c
+++ b/sys/dev/sound/pcm/feeder.c
@@ -181,7 +181,7 @@ feeder_find(struct pcm_channel *c, u_int32_t type)
#define score_val(s1) ((s1) & 0x3f00)
#define score_cse(s1) ((s1) & 0x7f)
-u_int32_t
+static u_int32_t
snd_fmtscore(u_int32_t fmt)
{
u_int32_t ret;
@@ -257,13 +257,13 @@ snd_fmtbestfunc(u_int32_t fmt, u_int32_t *fmts, int cheq)
return best;
}
-u_int32_t
+static u_int32_t
snd_fmtbestbit(u_int32_t fmt, u_int32_t *fmts)
{
return snd_fmtbestfunc(fmt, fmts, 0);
}
-u_int32_t
+static u_int32_t
snd_fmtbestchannel(u_int32_t fmt, u_int32_t *fmts)
{
return snd_fmtbestfunc(fmt, fmts, 1);
diff --git a/sys/dev/sound/pcm/feeder.h b/sys/dev/sound/pcm/feeder.h
index 1f106787ee83..e1e91d468455 100644
--- a/sys/dev/sound/pcm/feeder.h
+++ b/sys/dev/sound/pcm/feeder.h
@@ -63,9 +63,6 @@ struct pcm_feeder {
void feeder_register(void *p);
struct feeder_class *feeder_getclass(u_int32_t type);
-u_int32_t snd_fmtscore(u_int32_t fmt);
-u_int32_t snd_fmtbestbit(u_int32_t fmt, u_int32_t *fmts);
-u_int32_t snd_fmtbestchannel(u_int32_t fmt, u_int32_t *fmts);
u_int32_t snd_fmtbest(u_int32_t fmt, u_int32_t *fmts);
int feeder_add(struct pcm_channel *c, struct feeder_class *fc,
@@ -163,21 +160,3 @@ int feeder_matrix_oss_get_channel_order(struct pcmchan_matrix *,
unsigned long long *);
int feeder_matrix_oss_set_channel_order(struct pcmchan_matrix *,
unsigned long long *);
-
-/*
- * By default, various feeders only deal with sign 16/32 bit native-endian
- * since it should provide the fastest processing path. Processing 8bit samples
- * is too noisy due to limited dynamic range, while 24bit is quite slow due to
- * unnatural per-byte read/write. However, for debugging purposes, ensuring
- * implementation correctness and torture test, the following can be defined:
- *
- * SND_FEEDER_MULTIFORMAT - Compile all type of converters, but force
- * 8bit samples to be converted to 16bit
- * native-endian for better dynamic range.
- * Process 24bit samples natively.
- * SND_FEEDER_FULL_MULTIFORMAT - Ditto, but process 8bit samples natively.
- */
-#ifdef SND_FEEDER_FULL_MULTIFORMAT
-#undef SND_FEEDER_MULTIFORMAT
-#define SND_FEEDER_MULTIFORMAT 1
-#endif
diff --git a/sys/dev/sound/pcm/feeder_chain.c b/sys/dev/sound/pcm/feeder_chain.c
index 4ec50d810253..4fc846f77496 100644
--- a/sys/dev/sound/pcm/feeder_chain.c
+++ b/sys/dev/sound/pcm/feeder_chain.c
@@ -66,13 +66,7 @@ struct feeder_chain_desc {
#define FEEDER_CHAIN_FULLMULTI 4
#define FEEDER_CHAIN_LAST 5
-#if defined(SND_FEEDER_FULL_MULTIFORMAT)
#define FEEDER_CHAIN_DEFAULT FEEDER_CHAIN_FULLMULTI
-#elif defined(SND_FEEDER_MULTIFORMAT)
-#define FEEDER_CHAIN_DEFAULT FEEDER_CHAIN_MULTI
-#else
-#define FEEDER_CHAIN_DEFAULT FEEDER_CHAIN_LEAN
-#endif
/*
* List of preferred formats that might be required during
@@ -126,7 +120,7 @@ static uint32_t *feeder_chain_formats[FEEDER_CHAIN_LAST] = {
static int feeder_chain_mode = FEEDER_CHAIN_DEFAULT;
-#if defined(_KERNEL) && defined(SND_DEBUG) && defined(SND_FEEDER_FULL_MULTIFORMAT)
+#if defined(_KERNEL)
SYSCTL_INT(_hw_snd, OID_AUTO, feeder_chain_mode, CTLFLAG_RWTUN,
&feeder_chain_mode, 0,
"feeder chain mode "
@@ -589,12 +583,8 @@ feeder_chain(struct pcm_channel *c)
case FEEDER_CHAIN_LEAN:
case FEEDER_CHAIN_16:
case FEEDER_CHAIN_32:
-#if defined(SND_FEEDER_MULTIFORMAT) || defined(SND_FEEDER_FULL_MULTIFORMAT)
case FEEDER_CHAIN_MULTI:
-#endif
-#if defined(SND_FEEDER_FULL_MULTIFORMAT)
case FEEDER_CHAIN_FULLMULTI:
-#endif
break;
default:
feeder_chain_mode = FEEDER_CHAIN_DEFAULT;
diff --git a/sys/dev/sound/pcm/feeder_mixer.c b/sys/dev/sound/pcm/feeder_mixer.c
index 8c58e1c8ef33..be78b0cffb64 100644
--- a/sys/dev/sound/pcm/feeder_mixer.c
+++ b/sys/dev/sound/pcm/feeder_mixer.c
@@ -43,9 +43,6 @@
#include "snd_fxdiv_gen.h"
#endif
-#undef SND_FEEDER_MULTIFORMAT
-#define SND_FEEDER_MULTIFORMAT 1
-
struct feed_mixer_info {
uint32_t format;
uint32_t channels;
@@ -174,14 +171,6 @@ feed_mixer_rec(struct pcm_channel *c)
CHN_UNLOCK(ch);
continue;
}
-#ifdef SND_DEBUG
- if ((c->flags & CHN_F_DIRTY) && VCHAN_SYNC_REQUIRED(ch)) {
- if (vchan_sync(ch) != 0) {
- CHN_UNLOCK(ch);
- continue;
- }
- }
-#endif
bs = ch->bufsoft;
if (ch->flags & CHN_F_MMAP)
sndbuf_dispose(bs, NULL, sndbuf_getready(bs));
@@ -270,14 +259,6 @@ feed_mixer_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
CHN_UNLOCK(ch);
continue;
}
-#ifdef SND_DEBUG
- if ((c->flags & CHN_F_DIRTY) && VCHAN_SYNC_REQUIRED(ch)) {
- if (vchan_sync(ch) != 0) {
- CHN_UNLOCK(ch);
- continue;
- }
- }
-#endif
if ((ch->flags & CHN_F_MMAP) && !(ch->flags & CHN_F_CLOSING))
sndbuf_acquire(ch->bufsoft, NULL,
sndbuf_getfree(ch->bufsoft));
diff --git a/sys/dev/sound/pcm/feeder_rate.c b/sys/dev/sound/pcm/feeder_rate.c
index aee164840c4a..173f7811f547 100644
--- a/sys/dev/sound/pcm/feeder_rate.c
+++ b/sys/dev/sound/pcm/feeder_rate.c
@@ -89,21 +89,6 @@
#define Z_RATE_DEFAULT 48000
-#define Z_RATE_MIN FEEDRATE_RATEMIN
-#define Z_RATE_MAX FEEDRATE_RATEMAX
-#define Z_ROUNDHZ FEEDRATE_ROUNDHZ
-#define Z_ROUNDHZ_MIN FEEDRATE_ROUNDHZ_MIN
-#define Z_ROUNDHZ_MAX FEEDRATE_ROUNDHZ_MAX
-
-#define Z_RATE_SRC FEEDRATE_SRC
-#define Z_RATE_DST FEEDRATE_DST
-#define Z_RATE_QUALITY FEEDRATE_QUALITY
-#define Z_RATE_CHANNELS FEEDRATE_CHANNELS
-
-#define Z_PARANOID 1
-
-#define Z_MULTIFORMAT 1
-
#ifdef _KERNEL
#undef Z_USE_ALPHADRIFT
#define Z_USE_ALPHADRIFT 1
@@ -151,9 +136,9 @@ struct z_info {
z_resampler_t z_resample;
};
-int feeder_rate_min = Z_RATE_MIN;
-int feeder_rate_max = Z_RATE_MAX;
-int feeder_rate_round = Z_ROUNDHZ;
+int feeder_rate_min = FEEDRATE_RATEMIN;
+int feeder_rate_max = FEEDRATE_RATEMAX;
+int feeder_rate_round = FEEDRATE_ROUNDHZ;
int feeder_rate_quality = Z_QUALITY_DEFAULT;
static int feeder_rate_polyphase_max = Z_POLYPHASE_MAX;
@@ -222,10 +207,10 @@ sysctl_hw_snd_feeder_rate_round(SYSCTL_HANDLER_ARGS)
if (err != 0 || req->newptr == NULL || val == feeder_rate_round)
return (err);
- if (val < Z_ROUNDHZ_MIN || val > Z_ROUNDHZ_MAX)
+ if (val < FEEDRATE_ROUNDHZ_MIN || val > FEEDRATE_ROUNDHZ_MAX)
return (EINVAL);
- feeder_rate_round = val - (val % Z_ROUNDHZ);
+ feeder_rate_round = val - (val % FEEDRATE_ROUNDHZ);
return (0);
}
@@ -622,15 +607,10 @@ z_feed_sinc_polyphase_##SIGN##BIT##ENDIAN(struct z_info *info, uint8_t *dst) \
Z_DECLARE_SINC(SIGN, BIT, ENDIAN) \
Z_DECLARE_SINC_POLYPHASE(SIGN, BIT, ENDIAN)
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
Z_DECLARE(S, 16, LE)
Z_DECLARE(S, 32, LE)
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
Z_DECLARE(S, 16, BE)
Z_DECLARE(S, 32, BE)
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
Z_DECLARE(S, 8, NE)
Z_DECLARE(S, 24, LE)
Z_DECLARE(S, 24, BE)
@@ -643,7 +623,6 @@ Z_DECLARE(U, 24, BE)
Z_DECLARE(U, 32, BE)
Z_DECLARE(F, 32, LE)
Z_DECLARE(F, 32, BE)
-#endif
enum {
Z_RESAMPLER_ZOH,
@@ -672,15 +651,10 @@ static const struct {
uint32_t format;
z_resampler_t resampler[Z_RESAMPLER_LAST];
} z_resampler_tab[] = {
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
Z_RESAMPLER_ENTRY(S, 16, LE),
Z_RESAMPLER_ENTRY(S, 32, LE),
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
Z_RESAMPLER_ENTRY(S, 16, BE),
Z_RESAMPLER_ENTRY(S, 32, BE),
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
Z_RESAMPLER_ENTRY(S, 8, NE),
Z_RESAMPLER_ENTRY(S, 24, LE),
Z_RESAMPLER_ENTRY(S, 24, BE),
@@ -693,7 +667,6 @@ static const struct {
Z_RESAMPLER_ENTRY(U, 32, BE),
Z_RESAMPLER_ENTRY(F, 32, LE),
Z_RESAMPLER_ENTRY(F, 32, BE),
-#endif
};
#define Z_RESAMPLER_TAB_SIZE \
@@ -728,7 +701,6 @@ z_resampler_reset(struct z_info *info)
info->quality = Z_QUALITY_MAX;
}
-#ifdef Z_PARANOID
static int32_t
z_resampler_sinc_len(struct z_info *info)
{
@@ -766,9 +738,6 @@ z_resampler_sinc_len(struct z_info *info)
return (len);
}
-#else
-#define z_resampler_sinc_len(i) (Z_IS_SINC(i) ? Z_SINC_LEN(i) : 1)
-#endif
#define Z_POLYPHASE_COEFF_SHIFT 0
@@ -1422,21 +1391,21 @@ z_resampler_set(struct pcm_feeder *f, int what, int32_t value)
info = f->data;
switch (what) {
- case Z_RATE_SRC:
+ case FEEDRATE_SRC:
if (value < feeder_rate_min || value > feeder_rate_max)
return (E2BIG);
if (value == info->rsrc)
return (0);
info->rsrc = value;
break;
- case Z_RATE_DST:
+ case FEEDRATE_DST:
if (value < feeder_rate_min || value > feeder_rate_max)
return (E2BIG);
if (value == info->rdst)
return (0);
info->rdst = value;
break;
- case Z_RATE_QUALITY:
+ case FEEDRATE_QUALITY:
if (value < Z_QUALITY_MIN || value > Z_QUALITY_MAX)
return (EINVAL);
if (value == info->quality)
@@ -1453,7 +1422,7 @@ z_resampler_set(struct pcm_feeder *f, int what, int32_t value)
return (0);
info->quality = oquality;
break;
- case Z_RATE_CHANNELS:
+ case FEEDRATE_CHANNELS:
if (value < SND_CHN_MIN || value > SND_CHN_MAX)
return (EINVAL);
if (value == info->channels)
@@ -1475,13 +1444,13 @@ z_resampler_get(struct pcm_feeder *f, int what)
info = f->data;
switch (what) {
- case Z_RATE_SRC:
+ case FEEDRATE_SRC:
return (info->rsrc);
- case Z_RATE_DST:
+ case FEEDRATE_DST:
return (info->rdst);
- case Z_RATE_QUALITY:
+ case FEEDRATE_QUALITY:
return (info->quality);
- case Z_RATE_CHANNELS:
+ case FEEDRATE_CHANNELS:
return (info->channels);
}
diff --git a/sys/dev/sound/pcm/feeder_volume.c b/sys/dev/sound/pcm/feeder_volume.c
index fc4ed1bbb0a5..5f40816b4065 100644
--- a/sys/dev/sound/pcm/feeder_volume.c
+++ b/sys/dev/sound/pcm/feeder_volume.c
@@ -74,15 +74,10 @@ feed_volume_##SIGN##BIT##ENDIAN(int *vol, int *matrix, \
} while (--count != 0); \
}
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
FEEDVOLUME_DECLARE(S, 16, LE)
FEEDVOLUME_DECLARE(S, 32, LE)
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
FEEDVOLUME_DECLARE(S, 16, BE)
FEEDVOLUME_DECLARE(S, 32, BE)
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
FEEDVOLUME_DECLARE(S, 8, NE)
FEEDVOLUME_DECLARE(S, 24, LE)
FEEDVOLUME_DECLARE(S, 24, BE)
@@ -95,7 +90,6 @@ FEEDVOLUME_DECLARE(U, 24, BE)
FEEDVOLUME_DECLARE(U, 32, BE)
FEEDVOLUME_DECLARE(F, 32, LE)
FEEDVOLUME_DECLARE(F, 32, BE)
-#endif
struct feed_volume_info {
uint32_t bps, channels;
@@ -115,15 +109,10 @@ static const struct {
uint32_t format;
feed_volume_t apply;
} feed_volume_info_tab[] = {
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
FEEDVOLUME_ENTRY(S, 16, LE),
FEEDVOLUME_ENTRY(S, 32, LE),
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
FEEDVOLUME_ENTRY(S, 16, BE),
FEEDVOLUME_ENTRY(S, 32, BE),
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
FEEDVOLUME_ENTRY(S, 8, NE),
FEEDVOLUME_ENTRY(S, 24, LE),
FEEDVOLUME_ENTRY(S, 24, BE),
@@ -136,7 +125,6 @@ static const struct {
FEEDVOLUME_ENTRY(U, 32, BE),
FEEDVOLUME_ENTRY(F, 32, LE),
FEEDVOLUME_ENTRY(F, 32, BE),
-#endif
};
#define FEEDVOLUME_TAB_SIZE ((int32_t) \
@@ -242,11 +230,14 @@ feed_volume_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
{
int temp_vol[SND_CHN_T_VOL_MAX];
struct feed_volume_info *info;
+ struct snd_mixer *m;
+ struct snddev_info *d;
uint32_t j, align;
int i, *matrix;
uint8_t *dst;
const int16_t *vol;
const int8_t *muted;
+ bool master_muted = false;
/*
* Fetch filter data operation.
@@ -278,8 +269,14 @@ feed_volume_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
return (FEEDER_FEED(f->source, c, b, count, source));
/* Check if any controls are muted. */
+ d = (c != NULL) ? c->parentsnddev : NULL;
+ m = (d != NULL && d->mixer_dev != NULL) ? d->mixer_dev->si_drv1 : NULL;
+
+ if (m != NULL)
+ master_muted = (mix_getmutedevs(m) & (1 << SND_VOL_C_MASTER));
+
for (j = 0; j != SND_CHN_T_VOL_MAX; j++)
- temp_vol[j] = muted[j] ? 0 : vol[j];
+ temp_vol[j] = (muted[j] || master_muted) ? 0 : vol[j];
dst = b;
align = info->bps * info->channels;
diff --git a/sys/dev/sound/pcm/matrix.h b/sys/dev/sound/pcm/matrix.h
index e2798c651536..ffac162f41a1 100644
--- a/sys/dev/sound/pcm/matrix.h
+++ b/sys/dev/sound/pcm/matrix.h
@@ -29,11 +29,6 @@
#ifndef _SND_MATRIX_H_
#define _SND_MATRIX_H_
-#undef SND_MULTICHANNEL
-#ifndef SND_OLDSTEREO
-#define SND_MULTICHANNEL 1
-#endif
-
/*
* XXX = unused, but part of the definition (will be used someday, maybe).
*/
@@ -176,18 +171,12 @@
#define SND_CHN_T_END SND_CHN_T_TBR
#define SND_CHN_T_STEP 1
#define SND_CHN_MIN 1
-
-#ifdef SND_MULTICHANNEL
#define SND_CHN_MAX 8
-#else
-#define SND_CHN_MAX 2
-#endif
/*
* Multichannel interleaved volume matrix. Each calculated value relative
* to master and 0db will be stored in each CLASS + 1 as long as
- * chn_setvolume_matrix() or the equivalent CHN_SETVOLUME() macros is
- * used (see channel.c).
+ * chn_setvolume_matrix() is used (see channel.c).
*/
#define SND_VOL_C_MASTER 0
#define SND_VOL_C_PCM 1
diff --git a/sys/dev/sound/pcm/mixer.c b/sys/dev/sound/pcm/mixer.c
index 55b61ccb4911..6ed2d0c3ce5c 100644
--- a/sys/dev/sound/pcm/mixer.c
+++ b/sys/dev/sound/pcm/mixer.c
@@ -1142,9 +1142,9 @@ mixer_ioctl_channel_proc:
center = (left + right) >> 1;
chn_setvolume_multi(c, SND_VOL_C_PCM, left, right, center);
} else if ((cmd & ~0xff) == MIXER_READ(0)) {
- *(int *)arg = CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FL);
+ *(int *)arg = chn_getvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_FL);
*(int *)arg |=
- CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FR) << 8;
+ chn_getvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_FR) << 8;
}
CHN_UNLOCK(c);
diff --git a/sys/dev/sound/sndstat.c b/sys/dev/sound/sndstat.c
index b0ac7f7d0824..c28a932c784e 100644
--- a/sys/dev/sound/sndstat.c
+++ b/sys/dev/sound/sndstat.c
@@ -487,9 +487,9 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
c->feedcount);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_XRUNS, c->xruns);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_LEFTVOL,
- CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FL));
+ chn_getvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_FL));
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_RIGHTVOL,
- CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FR));
+ chn_getvolume_matrix(c, SND_VOL_C_PCM, SND_CHN_T_FR));
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_FORMAT,
c->bufhard->fmt);
nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_RATE,
diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c
index 7f49bae9ce5e..65976ced8a75 100644
--- a/sys/dev/sound/usb/uaudio.c
+++ b/sys/dev/sound/usb/uaudio.c
@@ -155,7 +155,6 @@ SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, debug, CTLFLAG_RWTUN,
#define MAKE_WORD(h,l) (((h) << 8) | (l))
#define BIT_TEST(bm,bno) (((bm)[(bno) / 8] >> (7 - ((bno) % 8))) & 1)
-#define UAUDIO_MAX_CHAN(x) (x)
#define MIX(sc) ((sc)->sc_mixer_node)
union uaudio_asid {
@@ -368,7 +367,6 @@ struct uaudio_softc_child {
};
struct uaudio_softc {
- struct sndcard_func sc_sndcard_func;
struct uaudio_chan sc_rec_chan[UAUDIO_MAX_CHILD];
struct uaudio_chan sc_play_chan[UAUDIO_MAX_CHILD];
struct umidi_chan sc_midi_chan;
@@ -556,9 +554,9 @@ static int umidi_open(struct usb_fifo *, int);
static int umidi_ioctl(struct usb_fifo *, u_long cmd, void *, int);
static void umidi_close(struct usb_fifo *, int);
static void umidi_init(device_t dev);
-static int umidi_probe(device_t dev);
+static int umidi_attach(device_t dev);
static int umidi_detach(device_t dev);
-static int uaudio_hid_probe(struct uaudio_softc *sc,
+static int uaudio_hid_attach(struct uaudio_softc *sc,
struct usb_attach_arg *uaa);
static void uaudio_hid_detach(struct uaudio_softc *sc);
@@ -1101,7 +1099,7 @@ uaudio_attach(device_t dev)
}
if (sc->sc_midi_chan.valid) {
- if (umidi_probe(dev)) {
+ if (umidi_attach(dev)) {
goto detach;
}
device_printf(dev, "MIDI sequencer.\n");
@@ -1113,8 +1111,6 @@ uaudio_attach(device_t dev)
/* attach the children */
- sc->sc_sndcard_func.func = SCF_PCM;
-
/*
* Only attach a PCM device if we have a playback, recording
* or mixer device present:
@@ -1131,14 +1127,12 @@ uaudio_attach(device_t dev)
DPRINTF("out of memory\n");
goto detach;
}
- device_set_ivars(sc->sc_child[i].pcm_device,
- &sc->sc_sndcard_func);
}
bus_attach_children(dev);
if (uaudio_handle_hid) {
- if (uaudio_hid_probe(sc, uaa) == 0) {
+ if (uaudio_hid_attach(sc, uaa) == 0) {
device_printf(dev, "HID volume keys found.\n");
} else {
device_printf(dev, "No HID volume keys found.\n");
@@ -1993,7 +1987,7 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
uint16_t wFormat;
wFormat = UGETW(asid.v1->wFormatTag);
- bChannels = UAUDIO_MAX_CHAN(asf1d.v1->bNrChannels);
+ bChannels = asf1d.v1->bNrChannels;
bBitResolution = asf1d.v1->bSubFrameSize * 8;
if (asf1d.v1->bSamFreqType == 0) {
@@ -2074,8 +2068,7 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
else
chan_alt->usb_cfg = uaudio_cfg_play;
- chan_alt->sample_size = (UAUDIO_MAX_CHAN(channels) *
- p_fmt->bPrecision) / 8;
+ chan_alt->sample_size = (channels * p_fmt->bPrecision) / 8;
chan_alt->channels = channels;
if (ep_dir == UE_DIR_IN &&
@@ -5805,9 +5798,7 @@ tr_setup:
}
}
- chan->curr_cable++;
- if (chan->curr_cable >= chan->max_emb_jack)
- chan->curr_cable = 0;
+ chan->curr_cable %= chan->max_emb_jack;
if (chan->curr_cable == start_cable) {
if (tr_any == 0)
@@ -5987,7 +5978,7 @@ static struct usb_fifo_methods umidi_fifo_methods = {
};
static int
-umidi_probe(device_t dev)
+umidi_attach(device_t dev)
{
struct uaudio_softc *sc = device_get_softc(dev);
struct usb_attach_arg *uaa = device_get_ivars(dev);
@@ -6174,7 +6165,7 @@ tr_setup:
}
static int
-uaudio_hid_probe(struct uaudio_softc *sc,
+uaudio_hid_attach(struct uaudio_softc *sc,
struct usb_attach_arg *uaa)
{
void *d_ptr;
diff --git a/sys/dev/sound/usb/uaudio_pcm.c b/sys/dev/sound/usb/uaudio_pcm.c
index 0b3da9b20440..c24c111f983c 100644
--- a/sys/dev/sound/usb/uaudio_pcm.c
+++ b/sys/dev/sound/usb/uaudio_pcm.c
@@ -190,18 +190,7 @@ MIXER_DECLARE(ua_mixer);
static int
ua_probe(device_t dev)
{
- struct sndcard_func *func;
-
- /* the parent device has already been probed */
-
- func = device_get_ivars(dev);
-
- if ((func == NULL) ||
- (func->func != SCF_PCM)) {
- return (ENXIO);
- }
-
- return (BUS_PROBE_DEFAULT);
+ return (0);
}
static int
diff --git a/sys/dev/thunderbolt/nhi.c b/sys/dev/thunderbolt/nhi.c
index 30a72652535a..74cefbb50ca1 100644
--- a/sys/dev/thunderbolt/nhi.c
+++ b/sys/dev/thunderbolt/nhi.c
@@ -84,11 +84,6 @@ MALLOC_DEFINE(M_NHI, "nhi", "nhi driver memory");
#define NHI_DEBUG_LEVEL 0
#endif
-/* 0 = default, 1 = force-on, 2 = force-off */
-#ifndef NHI_FORCE_HCM
-#define NHI_FORCE_HCM 0
-#endif
-
void
nhi_get_tunables(struct nhi_softc *sc)
{
@@ -100,7 +95,6 @@ nhi_get_tunables(struct nhi_softc *sc)
/* Set local defaults */
sc->debug = NHI_DEBUG_LEVEL;
sc->max_ring_count = NHI_DEFAULT_NUM_RINGS;
- sc->force_hcm = NHI_FORCE_HCM;
/* Inherit setting from the upstream thunderbolt switch node */
val = TB_GET_DEBUG(sc->dev, &sc->debug);
@@ -128,8 +122,6 @@ nhi_get_tunables(struct nhi_softc *sc)
val = min(val, NHI_MAX_NUM_RINGS);
sc->max_ring_count = max(val, 1);
}
- if (TUNABLE_INT_FETCH("hw.nhi.force_hcm", &val) != 0)
- sc->force_hcm = val;
/* Grab instance variables */
bzero(oid, 80);
@@ -143,24 +135,10 @@ nhi_get_tunables(struct nhi_softc *sc)
val = min(val, NHI_MAX_NUM_RINGS);
sc->max_ring_count = max(val, 1);
}
- snprintf(tmpstr, sizeof(tmpstr), "dev, nhi.%d.force_hcm",
- device_get_unit(sc->dev));
- if (TUNABLE_INT_FETCH(tmpstr, &val) != 0)
- sc->force_hcm = val;
return;
}
-static void
-nhi_configure_caps(struct nhi_softc *sc)
-{
-
- if (NHI_IS_USB4(sc) || (sc->force_hcm == NHI_FORCE_HCM_ON))
- sc->caps |= NHI_CAP_HCM;
- if (sc->force_hcm == NHI_FORCE_HCM_OFF)
- sc->caps &= ~NHI_CAP_HCM;
-}
-
struct nhi_cmd_frame *
nhi_alloc_tx_frame(struct nhi_ring_pair *r)
{
@@ -268,16 +246,14 @@ nhi_attach(struct nhi_softc *sc)
mtx_init(&sc->nhi_mtx, "nhimtx", "NHI Control Mutex", MTX_DEF);
- nhi_configure_caps(sc);
-
/*
* Get the number of TX/RX paths. This sizes some of the register
* arrays during allocation and initialization. USB4 spec says that
- * the max is 21. Alpine Ridge appears to default to 12.
+ * the max is 21.
*/
val = GET_HOST_CAPS_PATHS(nhi_read_reg(sc, NHI_HOST_CAPS));
tb_debug(sc, DBG_INIT|DBG_NOISY, "Total Paths= %d\n", val);
- if ((val == 0) || (val > 21) || ((NHI_IS_AR(sc) && val != 12))) {
+ if (val == 0 || val > 21) {
tb_printf(sc, "WARN: unexpected number of paths: %d\n", val);
/* return (ENXIO); */
}
@@ -297,10 +273,6 @@ nhi_attach(struct nhi_softc *sc)
if (error == 0)
error = tbdev_add_interface(sc);
- if ((error == 0) && (NHI_USE_ICM(sc)))
- tb_printf(sc, "WARN: device uses an internal connection manager\n");
- if ((error == 0) && (NHI_USE_HCM(sc)))
- ;
error = hcm_attach(sc);
if (error == 0)
@@ -312,9 +284,7 @@ nhi_attach(struct nhi_softc *sc)
int
nhi_detach(struct nhi_softc *sc)
{
-
- if (NHI_USE_HCM(sc))
- hcm_detach(sc);
+ hcm_detach(sc);
if (sc->root_rsc != NULL)
tb_router_detach(sc->root_rsc);
@@ -706,16 +676,6 @@ nhi_init(struct nhi_softc *sc)
tb_debug(sc, DBG_INIT, "Setting interrupt auto-ACK, 0x%08x\n", val);
nhi_write_reg(sc, NHI_DMA_MISC, val);
- if (NHI_IS_AR(sc) || NHI_IS_TR(sc) || NHI_IS_ICL(sc))
- tb_printf(sc, "WARN: device uses an internal connection manager\n");
-
- /*
- * Populate the controller (local) UUID, necessary for cross-domain
- * communications.
- if (NHI_IS_ICL(sc))
- nhi_pci_get_uuid(sc);
- */
-
/*
* Attach the router to the root thunderbolt bridge now that the DMA
* channel is configured and ready.
@@ -1163,9 +1123,6 @@ nhi_setup_sysctl(struct nhi_softc *sc)
SYSCTL_ADD_U16(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"max_rings", CTLFLAG_RD, &sc->max_ring_count, 0,
"Max number of rings available");
- SYSCTL_ADD_U8(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
- "force_hcm", CTLFLAG_RD, &sc->force_hcm, 0,
- "Force on/off the function of the host connection manager");
return (0);
}
diff --git a/sys/dev/thunderbolt/nhi_pci.c b/sys/dev/thunderbolt/nhi_pci.c
index 14cae7427448..777f8dd5352e 100644
--- a/sys/dev/thunderbolt/nhi_pci.c
+++ b/sys/dev/thunderbolt/nhi_pci.c
@@ -68,7 +68,6 @@ static int nhi_pci_resume(device_t);
static void nhi_pci_free(struct nhi_softc *);
static int nhi_pci_allocate_interrupts(struct nhi_softc *);
static void nhi_pci_free_resources(struct nhi_softc *);
-static int nhi_pci_icl_poweron(struct nhi_softc *);
static device_method_t nhi_methods[] = {
DEVMETHOD(device_probe, nhi_pci_probe),
@@ -89,67 +88,18 @@ static driver_t nhi_pci_driver = {
sizeof(struct nhi_softc)
};
-struct nhi_ident {
- uint16_t vendor;
- uint16_t device;
- uint16_t subvendor;
- uint16_t subdevice;
- uint32_t flags;
- const char *desc;
-} nhi_identifiers[] = {
- { VENDOR_INTEL, DEVICE_AR_2C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
- "Thunderbolt 3 NHI (Alpine Ridge 2C)" },
- { VENDOR_INTEL, DEVICE_AR_DP_B_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
- "Thunderbolt 3 NHI (Alpine Ridge 4C Rev B)" },
- { VENDOR_INTEL, DEVICE_AR_DP_C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
- "Thunderbolt 3 NHI (Alpine Ridge 4C Rev C)" },
- { VENDOR_INTEL, DEVICE_AR_LP_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
- "Thunderbolt 3 NHI (Alpine Ridge LP 2C)" },
- { VENDOR_INTEL, DEVICE_ICL_NHI_0, 0xffff, 0xffff, NHI_TYPE_ICL,
- "Thunderbolt 3 NHI Port 0 (IceLake)" },
- { VENDOR_INTEL, DEVICE_ICL_NHI_1, 0xffff, 0xffff, NHI_TYPE_ICL,
- "Thunderbolt 3 NHI Port 1 (IceLake)" },
- { VENDOR_AMD, DEVICE_PINK_SARDINE_0, 0xffff, 0xffff, NHI_TYPE_USB4,
- "USB4 NHI Port 0 (Pink Sardine)" },
- { VENDOR_AMD, DEVICE_PINK_SARDINE_1, 0xffff, 0xffff, NHI_TYPE_USB4,
- "USB4 NHI Port 1 (Pink Sardine)" },
- { 0, 0, 0, 0, 0, NULL }
-};
-
DRIVER_MODULE_ORDERED(nhi, pci, nhi_pci_driver, NULL, NULL,
SI_ORDER_ANY);
-static struct nhi_ident *
-nhi_find_ident(device_t dev)
-{
- struct nhi_ident *n;
-
- for (n = nhi_identifiers; n->vendor != 0; n++) {
- if (n->vendor != pci_get_vendor(dev))
- continue;
- if (n->device != pci_get_device(dev))
- continue;
- if ((n->subvendor != 0xffff) &&
- (n->subvendor != pci_get_subvendor(dev)))
- continue;
- if ((n->subdevice != 0xffff) &&
- (n->subdevice != pci_get_subdevice(dev)))
- continue;
- return (n);
- }
-
- return (NULL);
-}
-
static int
nhi_pci_probe(device_t dev)
{
- struct nhi_ident *n;
-
if (resource_disabled("tb", 0))
return (ENXIO);
- if ((n = nhi_find_ident(dev)) != NULL) {
- device_set_desc(dev, n->desc);
+ if ((pci_get_class(dev) == PCIC_SERIALBUS)
+ && (pci_get_subclass(dev) == PCIS_SERIALBUS_USB)
+ && (pci_get_progif(dev) == PCIP_SERIALBUS_USB_USB4)) {
+ device_set_desc(dev, "Generic USB4 NHI");
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
@@ -161,14 +111,12 @@ nhi_pci_attach(device_t dev)
devclass_t dc;
bus_dma_template_t t;
struct nhi_softc *sc;
- struct nhi_ident *n;
int error = 0;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
sc->dev = dev;
- n = nhi_find_ident(dev);
- sc->hwflags = n->flags;
+ sc->hwflags = NHI_TYPE_USB4;
nhi_get_tunables(sc);
tb_debug(sc, DBG_INIT|DBG_FULL, "busmaster status was %s\n",
@@ -188,12 +136,6 @@ nhi_pci_attach(device_t dev)
tb_printf(sc, "Upstream Facing Port is %s\n",
device_get_nameunit(sc->ufp));
- if (NHI_IS_ICL(sc)) {
- if ((error = nhi_pci_icl_poweron(sc)) != 0)
- return (error);
- }
-
-
/* Allocate BAR0 DMA registers */
sc->regs_rid = PCIR_BAR(0);
if ((sc->regs_resource = bus_alloc_resource_any(dev,
@@ -476,56 +418,3 @@ nhi_pci_disable_interrupts(struct nhi_softc *sc)
nhi_read_reg(sc, NHI_ISR0);
nhi_read_reg(sc, NHI_ISR1);
}
-
-/*
- * Icelake controllers need to be notified of power-on
- */
-static int
-nhi_pci_icl_poweron(struct nhi_softc *sc)
-{
- device_t dev;
- uint32_t val;
- int i, error = 0;
-
- dev = sc->dev;
- val = pci_read_config(dev, ICL_VSCAP_9, 4);
- tb_debug(sc, DBG_INIT, "icl_poweron val= 0x%x\n", val);
- if (val & ICL_VSCAP9_FWREADY)
- return (0);
-
- val = pci_read_config(dev, ICL_VSCAP_22, 4);
- val |= ICL_VSCAP22_FORCEPWR;
- tb_debug(sc, DBG_INIT|DBG_FULL, "icl_poweron writing 0x%x\n", val);
- pci_write_config(dev, ICL_VSCAP_22, val, 4);
-
- error = ETIMEDOUT;
- for (i = 0; i < 15; i++) {
- DELAY(1000000);
- val = pci_read_config(dev, ICL_VSCAP_9, 4);
- if (val & ICL_VSCAP9_FWREADY) {
- error = 0;
- break;
- }
- }
-
- return (error);
-}
-
-/*
- * Icelake and Alderlake controllers store their UUID in PCI config space
- */
-int
-nhi_pci_get_uuid(struct nhi_softc *sc)
-{
- device_t dev;
- uint32_t val[4];
-
- dev = sc->dev;
- val[0] = pci_read_config(dev, ICL_VSCAP_10, 4);
- val[1] = pci_read_config(dev, ICL_VSCAP_11, 4);
- val[2] = 0xffffffff;
- val[3] = 0xffffffff;
-
- bcopy(val, &sc->uuid, 16);
- return (0);
-}
diff --git a/sys/dev/thunderbolt/nhi_var.h b/sys/dev/thunderbolt/nhi_var.h
index e79ecc954c1f..e22c0f4a2bae 100644
--- a/sys/dev/thunderbolt/nhi_var.h
+++ b/sys/dev/thunderbolt/nhi_var.h
@@ -142,19 +142,9 @@ struct nhi_softc {
u_int debug;
u_int hwflags;
#define NHI_TYPE_UNKNOWN 0x00
-#define NHI_TYPE_AR 0x01 /* Alpine Ridge */
-#define NHI_TYPE_TR 0x02 /* Titan Ridge */
-#define NHI_TYPE_ICL 0x03 /* IceLake */
-#define NHI_TYPE_MR 0x04 /* Maple Ridge */
-#define NHI_TYPE_ADL 0x05 /* AlderLake */
#define NHI_TYPE_USB4 0x0f
#define NHI_TYPE_MASK 0x0f
#define NHI_MBOX_BUSY 0x10
- u_int caps;
-#define NHI_CAP_ICM 0x01
-#define NHI_CAP_HCM 0x02
-#define NHI_USE_ICM(sc) ((sc)->caps & NHI_CAP_ICM)
-#define NHI_USE_HCM(sc) ((sc)->caps & NHI_CAP_HCM)
struct hcm_softc *hcm;
struct router_softc *root_rsc;
@@ -194,11 +184,6 @@ struct nhi_softc {
struct intr_config_hook ich;
- uint8_t force_hcm;
-#define NHI_FORCE_HCM_DEFAULT 0x00
-#define NHI_FORCE_HCM_ON 0x01
-#define NHI_FORCE_HCM_OFF 0x02
-
uint8_t uuid[16];
uint8_t lc_uuid[16];
};
@@ -209,9 +194,6 @@ struct nhi_dispatch {
void *context;
};
-#define NHI_IS_AR(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_AR)
-#define NHI_IS_TR(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_TR)
-#define NHI_IS_ICL(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_ICL)
#define NHI_IS_USB4(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_USB4)
int nhi_pci_configure_interrupts(struct nhi_softc *sc);
diff --git a/sys/dev/thunderbolt/tb_pcib.c b/sys/dev/thunderbolt/tb_pcib.c
index ffb85ebec9ae..b30de5a7493c 100644
--- a/sys/dev/thunderbolt/tb_pcib.c
+++ b/sys/dev/thunderbolt/tb_pcib.c
@@ -90,18 +90,6 @@ struct tb_pcib_ident {
uint32_t flags; /* This follows the tb_softc flags */
const char *desc;
} tb_pcib_identifiers[] = {
- { VENDOR_INTEL, TB_DEV_AR_2C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
- "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge 2C)" },
- { VENDOR_INTEL, TB_DEV_AR_LP, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
- "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge LP)" },
- { VENDOR_INTEL, TB_DEV_AR_C_4C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
- "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge C 4C)" },
- { VENDOR_INTEL, TB_DEV_AR_C_2C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
- "Thunderbolt 3 PCI-PCI Bridge C (Alpine Ridge C 2C)" },
- { VENDOR_INTEL, TB_DEV_ICL_0, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_ICL,
- "Thunderbolt 3 PCI-PCI Bridge (IceLake)" },
- { VENDOR_INTEL, TB_DEV_ICL_1, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_ICL,
- "Thunderbolt 3 PCI-PCI Bridge (IceLake)" },
{ 0, 0, 0, 0, 0, NULL }
};
diff --git a/sys/dev/tpm/tpm20.c b/sys/dev/tpm/tpm20.c
index c521ca9dda9d..48f33708917d 100644
--- a/sys/dev/tpm/tpm20.c
+++ b/sys/dev/tpm/tpm20.c
@@ -41,7 +41,6 @@
MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver");
-static void tpm20_discard_buffer(void *arg);
#if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
static void tpm20_harvest(void *arg, int unused);
#endif
@@ -68,27 +67,23 @@ int
tpm20_read(struct cdev *dev, struct uio *uio, int flags)
{
struct tpm_sc *sc;
+ struct tpm_priv *priv;
size_t bytes_to_transfer;
size_t offset;
int result = 0;
sc = (struct tpm_sc *)dev->si_drv1;
+ devfs_get_cdevpriv((void **)&priv);
- callout_stop(&sc->discard_buffer_callout);
sx_xlock(&sc->dev_lock);
- if (sc->owner_tid != uio->uio_td->td_tid) {
- sx_xunlock(&sc->dev_lock);
- return (EPERM);
- }
-
- bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid);
- offset = sc->total_length - sc->pending_data_length;
+ offset = priv->offset;
+ bytes_to_transfer = MIN(priv->len, uio->uio_resid);
if (bytes_to_transfer > 0) {
- result = uiomove((caddr_t) sc->buf + offset, bytes_to_transfer, uio);
- sc->pending_data_length -= bytes_to_transfer;
- cv_signal(&sc->buf_cv);
+ result = uiomove((caddr_t) priv->buf + offset, bytes_to_transfer, uio);
+ priv->offset += bytes_to_transfer;
+ priv->len -= bytes_to_transfer;
} else {
- result = ETIMEDOUT;
+ result = 0;
}
sx_xunlock(&sc->dev_lock);
@@ -100,10 +95,12 @@ int
tpm20_write(struct cdev *dev, struct uio *uio, int flags)
{
struct tpm_sc *sc;
+ struct tpm_priv *priv;
size_t byte_count;
int result = 0;
sc = (struct tpm_sc *)dev->si_drv1;
+ devfs_get_cdevpriv((void **)&priv);
byte_count = uio->uio_resid;
if (byte_count < TPM_HEADER_SIZE) {
@@ -120,52 +117,42 @@ tpm20_write(struct cdev *dev, struct uio *uio, int flags)
sx_xlock(&sc->dev_lock);
- while (sc->pending_data_length != 0)
- cv_wait(&sc->buf_cv, &sc->dev_lock);
-
- result = uiomove(sc->buf, byte_count, uio);
+ result = uiomove(priv->buf, byte_count, uio);
if (result != 0) {
sx_xunlock(&sc->dev_lock);
return (result);
}
- result = TPM_TRANSMIT(sc->dev, byte_count);
-
- if (result == 0) {
- callout_reset(&sc->discard_buffer_callout,
- TPM_READ_TIMEOUT / tick, tpm20_discard_buffer, sc);
- sc->owner_tid = uio->uio_td->td_tid;
- }
+ result = TPM_TRANSMIT(sc->dev, priv, byte_count);
sx_xunlock(&sc->dev_lock);
return (result);
}
-static void
-tpm20_discard_buffer(void *arg)
+static struct tpm_priv *
+tpm20_priv_alloc(void)
{
- struct tpm_sc *sc;
-
- sc = (struct tpm_sc *)arg;
- if (callout_pending(&sc->discard_buffer_callout))
- return;
+ struct tpm_priv *priv;
- sx_xlock(&sc->dev_lock);
-
- memset(sc->buf, 0, TPM_BUFSIZE);
- sc->pending_data_length = 0;
- sc->total_length = 0;
+ priv = malloc(sizeof (*priv), M_TPM20, M_WAITOK | M_ZERO);
+ return (priv);
+}
- cv_signal(&sc->buf_cv);
- sx_xunlock(&sc->dev_lock);
+static void
+tpm20_priv_dtor(void *data)
+{
+ struct tpm_priv *priv = data;
- device_printf(sc->dev,
- "User failed to read buffer in time\n");
+ free(priv->buf, M_TPM20);
}
int
tpm20_open(struct cdev *dev, int flag, int mode, struct thread *td)
{
+ struct tpm_priv *priv;
+
+ priv = tpm20_priv_alloc();
+ devfs_set_cdevpriv(priv, tpm20_priv_dtor);
return (0);
}
@@ -198,10 +185,7 @@ tpm20_init(struct tpm_sc *sc)
struct make_dev_args args;
int result;
- cv_init(&sc->buf_cv, "TPM buffer cv");
- callout_init(&sc->discard_buffer_callout, 1);
- sc->pending_data_length = 0;
- sc->total_length = 0;
+ sc->internal_priv = tpm20_priv_alloc();
make_dev_args_init(&args);
args.mda_devsw = &tpm20_cdevsw;
@@ -234,11 +218,8 @@ tpm20_release(struct tpm_sc *sc)
random_source_deregister(&random_tpm);
#endif
- if (sc->buf != NULL)
- free(sc->buf, M_TPM20);
-
+ tpm20_priv_dtor(sc->internal_priv);
sx_destroy(&sc->dev_lock);
- cv_destroy(&sc->buf_cv);
if (sc->sc_cdev != NULL)
destroy_dev(sc->sc_cdev);
}
@@ -286,6 +267,7 @@ static void
tpm20_harvest(void *arg, int unused)
{
struct tpm_sc *sc;
+ struct tpm_priv *priv;
unsigned char entropy[TPM_HARVEST_SIZE];
uint16_t entropy_size;
int result;
@@ -298,26 +280,22 @@ tpm20_harvest(void *arg, int unused)
sc = arg;
sx_xlock(&sc->dev_lock);
- while (sc->pending_data_length != 0)
- cv_wait(&sc->buf_cv, &sc->dev_lock);
- memcpy(sc->buf, cmd, sizeof(cmd));
- result = TPM_TRANSMIT(sc->dev, sizeof(cmd));
+ priv = sc->internal_priv;
+ memcpy(priv->buf, cmd, sizeof(cmd));
+
+ result = TPM_TRANSMIT(sc->dev, priv, sizeof(cmd));
if (result != 0) {
sx_xunlock(&sc->dev_lock);
return;
}
- /* Ignore response size */
- sc->pending_data_length = 0;
- sc->total_length = 0;
-
/* The number of random bytes we got is placed right after the header */
- entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1];
+ entropy_size = (uint16_t) priv->buf[TPM_HEADER_SIZE + 1];
if (entropy_size > 0) {
entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE);
memcpy(entropy,
- sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t),
+ priv->buf + TPM_HEADER_SIZE + sizeof(uint16_t),
entropy_size);
}
@@ -334,6 +312,7 @@ static int
tpm20_restart(device_t dev, bool clear)
{
struct tpm_sc *sc;
+ struct tpm_priv *priv;
uint8_t startup_cmd[] = {
0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/
0x00, 0x00, 0x00, 0x0C, /* cmd length */
@@ -349,18 +328,16 @@ tpm20_restart(device_t dev, bool clear)
if (clear)
startup_cmd[11] = 0; /* TPM_SU_CLEAR */
- if (sc == NULL || sc->buf == NULL)
+ if (sc == NULL)
return (0);
sx_xlock(&sc->dev_lock);
- MPASS(sc->pending_data_length == 0);
- memcpy(sc->buf, startup_cmd, sizeof(startup_cmd));
+ priv = sc->internal_priv;
+ memcpy(priv->buf, startup_cmd, sizeof(startup_cmd));
/* XXX Ignoring both TPM_TRANSMIT return and tpm's response */
- TPM_TRANSMIT(sc->dev, sizeof(startup_cmd));
- sc->pending_data_length = 0;
- sc->total_length = 0;
+ TPM_TRANSMIT(sc->dev, priv, sizeof(startup_cmd));
sx_xunlock(&sc->dev_lock);
@@ -371,6 +348,7 @@ static int
tpm20_save_state(device_t dev, bool suspend)
{
struct tpm_sc *sc;
+ struct tpm_priv *priv;
uint8_t save_cmd[] = {
0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/
0x00, 0x00, 0x00, 0x0C, /* cmd length */
@@ -386,18 +364,16 @@ tpm20_save_state(device_t dev, bool suspend)
if (suspend)
save_cmd[11] = 1; /* TPM_SU_STATE */
- if (sc == NULL || sc->buf == NULL)
+ if (sc == NULL)
return (0);
sx_xlock(&sc->dev_lock);
- MPASS(sc->pending_data_length == 0);
- memcpy(sc->buf, save_cmd, sizeof(save_cmd));
+ priv = sc->internal_priv;
+ memcpy(priv->buf, save_cmd, sizeof(save_cmd));
/* XXX Ignoring both TPM_TRANSMIT return and tpm's response */
- TPM_TRANSMIT(sc->dev, sizeof(save_cmd));
- sc->pending_data_length = 0;
- sc->total_length = 0;
+ TPM_TRANSMIT(sc->dev, priv, sizeof(save_cmd));
sx_xunlock(&sc->dev_lock);
diff --git a/sys/dev/tpm/tpm20.h b/sys/dev/tpm/tpm20.h
index 96b2920283b6..b63bb9a1436e 100644
--- a/sys/dev/tpm/tpm20.h
+++ b/sys/dev/tpm/tpm20.h
@@ -105,6 +105,12 @@
MALLOC_DECLARE(M_TPM20);
+struct tpm_priv {
+ uint8_t buf[TPM_BUFSIZE];
+ size_t offset;
+ size_t len;
+};
+
struct tpm_sc {
device_t dev;
@@ -116,18 +122,13 @@ struct tpm_sc {
struct cdev *sc_cdev;
struct sx dev_lock;
- struct cv buf_cv;
void *intr_cookie;
int intr_type; /* Current event type */
bool interrupts;
- uint8_t *buf;
- size_t pending_data_length;
- size_t total_length;
- lwpid_t owner_tid;
+ struct tpm_priv *internal_priv;
- struct callout discard_buffer_callout;
#if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
struct timeout_task harvest_task;
#endif
diff --git a/sys/dev/tpm/tpm_crb.c b/sys/dev/tpm/tpm_crb.c
index 18414a6c799b..ac093c3857ba 100644
--- a/sys/dev/tpm/tpm_crb.c
+++ b/sys/dev/tpm/tpm_crb.c
@@ -127,7 +127,7 @@ struct tpmcrb_sc {
size_t rsp_buf_size;
};
-int tpmcrb_transmit(device_t dev, size_t size);
+int tpmcrb_transmit(device_t dev, struct tpm_priv *priv, size_t size);
static int tpmcrb_acpi_probe(device_t dev);
static int tpmcrb_attach(device_t dev);
@@ -257,7 +257,6 @@ tpmcrb_attach(device_t dev)
sc->dev = dev;
sx_init(&sc->dev_lock, "TPM driver lock");
- sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK);
sc->mem_rid = 0;
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
@@ -480,7 +479,7 @@ tpmcrb_state_ready(struct tpmcrb_sc *crb_sc, bool wait)
}
int
-tpmcrb_transmit(device_t dev, size_t length)
+tpmcrb_transmit(device_t dev, struct tpm_priv *priv, size_t length)
{
struct tpmcrb_sc *crb_sc;
struct tpm_sc *sc;
@@ -531,12 +530,12 @@ tpmcrb_transmit(device_t dev, size_t length)
* Calculate timeout for current command.
* Command code is passed in bytes 6-10.
*/
- curr_cmd = be32toh(*(uint32_t *) (&sc->buf[6]));
+ curr_cmd = be32toh(*(uint32_t *) (&priv->buf[6]));
timeout = tpm20_get_timeout(curr_cmd);
/* Send command and tell device to process it. */
bus_write_region_stream_1(sc->mem_res, crb_sc->cmd_off,
- sc->buf, length);
+ priv->buf, length);
TPM_WRITE_BARRIER(dev, crb_sc->cmd_off, length);
TPM_WRITE_4(dev, TPM_CRB_CTRL_START, TPM_CRB_CTRL_START_CMD);
@@ -559,8 +558,8 @@ tpmcrb_transmit(device_t dev, size_t length)
/* Read response header. Length is passed in bytes 2 - 6. */
bus_read_region_stream_1(sc->mem_res, crb_sc->rsp_off,
- sc->buf, TPM_HEADER_SIZE);
- bytes_available = be32toh(*(uint32_t *) (&sc->buf[2]));
+ priv->buf, TPM_HEADER_SIZE);
+ bytes_available = be32toh(*(uint32_t *) (&priv->buf[2]));
if (bytes_available > TPM_BUFSIZE || bytes_available < TPM_HEADER_SIZE) {
device_printf(dev,
@@ -570,7 +569,7 @@ tpmcrb_transmit(device_t dev, size_t length)
}
bus_read_region_stream_1(sc->mem_res, crb_sc->rsp_off + TPM_HEADER_SIZE,
- &sc->buf[TPM_HEADER_SIZE], bytes_available - TPM_HEADER_SIZE);
+ &priv->buf[TPM_HEADER_SIZE], bytes_available - TPM_HEADER_SIZE);
/*
* No need to wait for the transition to idle on the way out, we can
@@ -583,8 +582,8 @@ tpmcrb_transmit(device_t dev, size_t length)
}
tpmcrb_relinquish_locality(sc);
- sc->pending_data_length = bytes_available;
- sc->total_length = bytes_available;
+ priv->offset = 0;
+ priv->len = bytes_available;
return (0);
}
diff --git a/sys/dev/tpm/tpm_if.m b/sys/dev/tpm/tpm_if.m
index b0149ba163a6..3b0dc9e3892f 100644
--- a/sys/dev/tpm/tpm_if.m
+++ b/sys/dev/tpm/tpm_if.m
@@ -28,6 +28,10 @@
#include <sys/bus.h>
#include <dev/tpm/tpm20.h>
+HEADER {
+ struct tpm_priv;
+};
+
INTERFACE tpm;
#
@@ -35,6 +39,7 @@ INTERFACE tpm;
#
METHOD int transmit {
device_t dev;
+ struct tpm_priv *priv;
size_t length;
};
diff --git a/sys/dev/tpm/tpm_tis_core.c b/sys/dev/tpm/tpm_tis_core.c
index 34ecfcc283b1..f49a1f982e82 100644
--- a/sys/dev/tpm/tpm_tis_core.c
+++ b/sys/dev/tpm/tpm_tis_core.c
@@ -73,7 +73,7 @@
#define TPM_STS_BURST_MASK 0xFFFF00
#define TPM_STS_BURST_OFFSET 0x8
-static int tpmtis_transmit(device_t dev, size_t length);
+static int tpmtis_transmit(device_t dev, struct tpm_priv *priv, size_t length);
static int tpmtis_detach(device_t dev);
@@ -104,7 +104,6 @@ tpmtis_attach(device_t dev)
sc->intr_type = -1;
sx_init(&sc->dev_lock, "TPM driver lock");
- sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK);
resource_int_value("tpm", device_get_unit(dev), "use_polling", &poll);
if (poll != 0) {
@@ -164,6 +163,7 @@ tpmtis_detach(device_t dev)
static void
tpmtis_test_intr(struct tpm_sc *sc)
{
+ struct tpm_priv *priv;
uint8_t cmd[] = {
0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/
0x00, 0x00, 0x00, 0x0c, /* cmd length */
@@ -172,9 +172,9 @@ tpmtis_test_intr(struct tpm_sc *sc)
};
sx_xlock(&sc->dev_lock);
- memcpy(sc->buf, cmd, sizeof(cmd));
- tpmtis_transmit(sc->dev, sizeof(cmd));
- sc->pending_data_length = 0;
+ priv = sc->internal_priv;
+ memcpy(priv->buf, cmd, sizeof(cmd));
+ tpmtis_transmit(sc->dev, priv, sizeof(cmd));
sx_xunlock(&sc->dev_lock);
}
@@ -384,7 +384,7 @@ tpmtis_go_ready(struct tpm_sc *sc)
}
static int
-tpmtis_transmit(device_t dev, size_t length)
+tpmtis_transmit(device_t dev, struct tpm_priv *priv, size_t length)
{
struct tpm_sc *sc;
size_t bytes_available;
@@ -404,7 +404,7 @@ tpmtis_transmit(device_t dev, size_t length)
"Failed to switch to ready state\n");
return (EIO);
}
- if (!tpmtis_write_bytes(sc, length, sc->buf)) {
+ if (!tpmtis_write_bytes(sc, length, priv->buf)) {
device_printf(dev,
"Failed to write cmd to device\n");
return (EIO);
@@ -428,7 +428,7 @@ tpmtis_transmit(device_t dev, size_t length)
* Calculate timeout for current command.
* Command code is passed in bytes 6-10.
*/
- curr_cmd = be32toh(*(uint32_t *) (&sc->buf[6]));
+ curr_cmd = be32toh(*(uint32_t *) (&priv->buf[6]));
timeout = tpm20_get_timeout(curr_cmd);
TPM_WRITE_4(dev, TPM_STS, TPM_STS_CMD_START);
@@ -455,12 +455,12 @@ tpmtis_transmit(device_t dev, size_t length)
return (EIO);
}
/* Read response header. Length is passed in bytes 2 - 6. */
- if(!tpmtis_read_bytes(sc, TPM_HEADER_SIZE, sc->buf)) {
+ if (!tpmtis_read_bytes(sc, TPM_HEADER_SIZE, priv->buf)) {
device_printf(dev,
"Failed to read response header\n");
return (EIO);
}
- bytes_available = be32toh(*(uint32_t *) (&sc->buf[2]));
+ bytes_available = be32toh(*(uint32_t *) (&priv->buf[2]));
if (bytes_available > TPM_BUFSIZE || bytes_available < TPM_HEADER_SIZE) {
device_printf(dev,
@@ -468,15 +468,15 @@ tpmtis_transmit(device_t dev, size_t length)
bytes_available);
return (EIO);
}
- if(!tpmtis_read_bytes(sc, bytes_available - TPM_HEADER_SIZE,
- &sc->buf[TPM_HEADER_SIZE])) {
+ if (!tpmtis_read_bytes(sc, bytes_available - TPM_HEADER_SIZE,
+ &priv->buf[TPM_HEADER_SIZE])) {
device_printf(dev,
"Failed to read response\n");
return (EIO);
}
tpmtis_relinquish_locality(sc);
- sc->pending_data_length = bytes_available;
- sc->total_length = bytes_available;
+ priv->offset = 0;
+ priv->len = bytes_available;
return (0);
}
diff --git a/sys/dev/uart/uart_bus_pci.c b/sys/dev/uart/uart_bus_pci.c
index 22af8ee8663c..97c5ff84d251 100644
--- a/sys/dev/uart/uart_bus_pci.c
+++ b/sys/dev/uart/uart_bus_pci.c
@@ -280,33 +280,43 @@ uart_pci_probe(device_t dev)
{
struct uart_softc *sc;
const struct pci_id *id;
- struct pci_id cid = {
- .regshft = 0,
- .rclk = 0,
- .rid = 0x10 | PCI_NO_MSI,
- .desc = "Generic SimpleComm PCI device",
- };
- int result;
sc = device_get_softc(dev);
id = uart_pci_match(dev, pci_ns8250_ids);
if (id != NULL) {
sc->sc_class = &uart_ns8250_class;
- goto match;
+ return (BUS_PROBE_SPECIFIC);
}
if (pci_get_class(dev) == PCIC_SIMPLECOMM &&
pci_get_subclass(dev) == PCIS_SIMPLECOMM_UART &&
- pci_get_progif(dev) < PCIP_SIMPLECOMM_UART_16550A) {
- /* XXX rclk what to do */
- id = &cid;
+ pci_get_progif(dev) <= PCIP_SIMPLECOMM_UART_16550A) {
sc->sc_class = &uart_ns8250_class;
- goto match;
+ return (BUS_PROBE_GENERIC);
}
/* Add checks for non-ns8250 IDs here. */
return (ENXIO);
+}
+
+static int
+uart_pci_attach(device_t dev)
+{
+ static const struct pci_id cid = {
+ .regshft = 0,
+ .rclk = 0,
+ .rid = 0x10 | PCI_NO_MSI,
+ .desc = "Generic SimpleComm PCI device",
+ };
+ struct uart_softc *sc;
+ const struct pci_id *id = uart_pci_match(dev, pci_ns8250_ids);
+ int count, result;
+
+ if (id == NULL)
+ /* No specific PCI ID match, must be a generic device. */
+ id = &cid;
+
+ sc = device_get_softc(dev);
- match:
result = uart_bus_probe(dev, id->regshft, 0, id->rclk,
id->rid & PCI_RID_MASK, 0, 0);
/* Bail out on error. */
@@ -322,25 +332,13 @@ uart_pci_probe(device_t dev)
/* Set/override the device description. */
if (id->desc)
device_set_desc(dev, id->desc);
- return (result);
-}
-
-static int
-uart_pci_attach(device_t dev)
-{
- struct uart_softc *sc;
- const struct pci_id *id;
- int count;
-
- sc = device_get_softc(dev);
/*
- * Use MSI in preference to legacy IRQ if available. However, experience
- * suggests this is only reliable when one MSI vector is advertised.
+ * Use MSI in preference to legacy IRQ if available. However,
+ * experience suggests this is only reliable when one MSI vector is
+ * advertised.
*/
- id = uart_pci_match(dev, pci_ns8250_ids);
- if ((id == NULL || (id->rid & PCI_NO_MSI) == 0) &&
- pci_msi_count(dev) == 1) {
+ if ((id->rid & PCI_NO_MSI) == 0 && pci_msi_count(dev) == 1) {
count = 1;
if (pci_alloc_msi(dev, &count) == 0) {
sc->sc_irid = 1;
diff --git a/sys/dev/uart/uart_dev_ns8250.c b/sys/dev/uart/uart_dev_ns8250.c
index b0c7cd4b44e1..c13eabe9055e 100644
--- a/sys/dev/uart/uart_dev_ns8250.c
+++ b/sys/dev/uart/uart_dev_ns8250.c
@@ -999,6 +999,15 @@ ns8250_bus_probe(struct uart_softc *sc)
uart_setreg(bas, REG_IER, ier);
uart_setreg(bas, REG_MCR, mcr);
uart_setreg(bas, REG_FCR, 0);
+ /*
+ * The Alder Lake AMT SOL Redirection device will never
+ * set LSR_OE (when in loopback mode at least) and
+ * instead block further input by not setting LSR_TEMT.
+ * Recovering the device afterwards into a working
+ * state requires re-writing the LCR register. This
+ * should be harmless on all other devices.
+ */
+ uart_setreg(bas, REG_LCR, uart_getreg(bas, REG_LCR));
uart_barrier(bas);
count = 0;
goto describe;
diff --git a/sys/dev/ufshci/ufshci_acpi.c b/sys/dev/ufshci/ufshci_acpi.c
new file mode 100644
index 000000000000..94da0d3cb411
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_acpi.c
@@ -0,0 +1,248 @@
+/*-
+ * Copyright (c) 2026, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+#include "ufshci_private.h"
+
+static int ufshci_acpi_probe(device_t);
+static int ufshci_acpi_attach(device_t);
+static int ufshci_acpi_detach(device_t);
+static int ufshci_acpi_suspend(device_t);
+static int ufshci_acpi_resume(device_t);
+
+static device_method_t ufshci_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ufshci_acpi_probe),
+ DEVMETHOD(device_attach, ufshci_acpi_attach),
+ DEVMETHOD(device_detach, ufshci_acpi_detach),
+ DEVMETHOD(device_suspend, ufshci_acpi_suspend),
+ DEVMETHOD(device_resume, ufshci_acpi_resume), { 0, 0 }
+};
+
+static driver_t ufshci_acpi_driver = {
+ "ufshci",
+ ufshci_acpi_methods,
+ sizeof(struct ufshci_controller),
+};
+
+DRIVER_MODULE(ufshci, acpi, ufshci_acpi_driver, 0, 0);
+MODULE_DEPEND(ufshci, acpi, 1, 1, 1);
+
+static struct ufshci_acpi_device {
+ const char *hid;
+ const char *desc;
+ uint32_t ref_clk;
+ uint32_t quirks;
+} ufshci_acpi_devices[] = {
+ { "QCOM24A5", "Qualcomm Snapdragon X Elite UFS Host Controller",
+ UFSHCI_REF_CLK_19_2MHz,
+ UFSHCI_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH |
+ UFSHCI_QUIRK_BROKEN_LSDBS_MCQS_CAP },
+ { 0x00000000, NULL, 0, 0 }
+};
+
+static char *ufshci_acpi_ids[] = { "QCOM24A5", NULL };
+
+static const struct ufshci_acpi_device *
+ufshci_acpi_find_device(device_t dev)
+{
+ char *hid;
+ int i;
+ int rv;
+
+ rv = ACPI_ID_PROBE(device_get_parent(dev), dev, ufshci_acpi_ids, &hid);
+ if (rv > 0)
+ return (NULL);
+
+ for (i = 0; ufshci_acpi_devices[i].hid != NULL; i++) {
+ if (strcmp(ufshci_acpi_devices[i].hid, hid) != 0)
+ continue;
+ return (&ufshci_acpi_devices[i]);
+ }
+
+ return (NULL);
+}
+
+static int
+ufshci_acpi_probe(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ const struct ufshci_acpi_device *acpi_dev;
+
+ acpi_dev = ufshci_acpi_find_device(dev);
+ if (acpi_dev == NULL)
+ return (ENXIO);
+
+ if (acpi_dev->hid) {
+ ctrlr->quirks = acpi_dev->quirks;
+ ctrlr->ref_clk = acpi_dev->ref_clk;
+ }
+
+ if (acpi_dev->desc) {
+ device_set_desc(dev, acpi_dev->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+ufshci_acpi_allocate_memory(struct ufshci_controller *ctrlr)
+{
+ ctrlr->resource_id = 0;
+ ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
+ &ctrlr->resource_id, RF_ACTIVE);
+
+ if (ctrlr->resource == NULL) {
+ ufshci_printf(ctrlr, "unable to allocate acpi resource\n");
+ return (ENOMEM);
+ }
+
+ ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
+ ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
+ ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle;
+
+ return (0);
+}
+
+static int
+ufshci_acpi_setup_shared(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ ctrlr->num_io_queues = 1;
+ ctrlr->rid = 0;
+ ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
+ &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
+ if (ctrlr->res == NULL) {
+ ufshci_printf(ctrlr, "unable to allocate shared interrupt\n");
+ return (ENOMEM);
+ }
+
+ error = bus_setup_intr(ctrlr->dev, ctrlr->res,
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler,
+ ctrlr, &ctrlr->tag);
+ if (error) {
+ ufshci_printf(ctrlr, "unable to setup shared interrupt\n");
+ return (error);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_acpi_setup_interrupts(struct ufshci_controller *ctrlr)
+{
+ int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq;
+
+ /*
+ * TODO: Need to implement MCQ(Multi Circular Queue)
+ * Example: num_io_queues = mp_ncpus;
+ */
+ num_io_queues = 1;
+ TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues);
+ if (num_io_queues < 1 || num_io_queues > mp_ncpus)
+ num_io_queues = mp_ncpus;
+
+ per_cpu_io_queues = 1;
+ TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues);
+ if (per_cpu_io_queues == 0)
+ num_io_queues = 1;
+
+ min_cpus_per_ioq = smp_threads_per_core;
+ TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq);
+ if (min_cpus_per_ioq > 1) {
+ num_io_queues = min(num_io_queues,
+ max(1, mp_ncpus / min_cpus_per_ioq));
+ }
+
+ if (num_io_queues > vm_ndomains)
+ num_io_queues -= num_io_queues % vm_ndomains;
+
+ ctrlr->num_io_queues = num_io_queues;
+ return (ufshci_acpi_setup_shared(ctrlr));
+}
+
+static int
+ufshci_acpi_attach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int status;
+
+ ctrlr->dev = dev;
+ status = ufshci_acpi_allocate_memory(ctrlr);
+ if (status != 0)
+ goto bad;
+
+ status = ufshci_acpi_setup_interrupts(ctrlr);
+ if (status != 0)
+ goto bad;
+
+ return (ufshci_attach(dev));
+bad:
+ if (ctrlr->resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
+ ctrlr->resource);
+ }
+
+ if (ctrlr->tag)
+ bus_teardown_intr(dev, ctrlr->res, ctrlr->tag);
+
+ if (ctrlr->res)
+ bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res),
+ ctrlr->res);
+
+ return (status);
+}
+
+static int
+ufshci_acpi_detach(device_t dev)
+{
+ return (ufshci_detach(dev));
+}
+
+static int
+ufshci_acpi_suspend(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int error;
+
+ error = bus_generic_suspend(dev);
+ if (error)
+ return (error);
+
+ /* Currently, PCI-based ufshci only supports POWER_STYPE_STANDBY */
+ error = ufshci_ctrlr_suspend(ctrlr, POWER_STYPE_STANDBY);
+ return (error);
+}
+
+static int
+ufshci_acpi_resume(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int error;
+
+ error = ufshci_ctrlr_resume(ctrlr, POWER_STYPE_AWAKE);
+ if (error)
+ return (error);
+
+ error = bus_generic_resume(dev);
+ return (error);
+}
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
index f011d03189e0..244aa723d02a 100644
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -21,6 +21,50 @@ ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
ufshci_req_queue_fail(ctrlr, &ctrlr->transfer_req_queue);
}
+/* Some controllers require a reinit after switching to the max gear. */
+static int
+ufshci_ctrlr_reinit_after_max_gear_switch(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ /* Reset device */
+ ufshci_utmr_req_queue_disable(ctrlr);
+ ufshci_utr_req_queue_disable(ctrlr);
+
+ error = ufshci_ctrlr_disable(ctrlr);
+ if (error != 0)
+ return (error);
+
+ error = ufshci_ctrlr_enable(ctrlr);
+ if (error != 0)
+ return (error);
+
+ error = ufshci_utmr_req_queue_enable(ctrlr);
+ if (error != 0)
+ return (error);
+
+ error = ufshci_utr_req_queue_enable(ctrlr);
+ if (error != 0)
+ return (error);
+
+ error = ufshci_ctrlr_send_nop(ctrlr);
+ if (error != 0)
+ return (error);
+
+ /* Reinit the target device. */
+ error = ufshci_dev_init(ctrlr);
+ if (error != 0)
+ return (error);
+
+ /* Initialize Reference Clock */
+ error = ufshci_dev_init_reference_clock(ctrlr);
+ if (error != 0)
+ return (error);
+
+ /* Initialize unipro */
+ return (ufshci_dev_init_unipro(ctrlr));
+}
+
static void
ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
{
@@ -77,6 +121,12 @@ ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
ufshci_dev_init_uic_link_state(ctrlr);
+ if ((ctrlr->quirks & UFSHCI_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) &&
+ ufshci_ctrlr_reinit_after_max_gear_switch(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
/* Read Controller Descriptor (Device, Geometry) */
if (ufshci_dev_get_descriptor(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
@@ -199,7 +249,7 @@ ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
return (error);
}
-static int
+int
ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
{
uint32_t ie, hcs;
@@ -302,15 +352,18 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
/* Read Device Capabilities */
ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap);
- ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap);
- /*
- * TODO: This driver does not yet support multi-queue.
- * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if
- * multi-queue support is available.
- */
- ctrlr->is_mcq_supported = false;
- if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported))
+ if (ctrlr->quirks & UFSHCI_QUIRK_BROKEN_LSDBS_MCQS_CAP) {
+ ctrlr->is_single_db_supported = true;
+ ctrlr->is_mcq_supported = true;
+ } else {
+ ctrlr->is_single_db_supported = (UFSHCIV(UFSHCI_CAP_REG_LSDBS,
+ cap) == 0);
+ ctrlr->is_mcq_supported = (UFSHCIV(UFSHCI_CAP_REG_MCQS, cap) ==
+ 1);
+ }
+ if (!(ctrlr->is_single_db_supported || ctrlr->is_mcq_supported))
return (ENXIO);
+
/*
* The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB
* However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
index 3167945b53b6..0fedbca9a90e 100644
--- a/sys/dev/ufshci/ufshci_dev.c
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -325,7 +325,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
*/
const uint32_t fast_mode = 1;
const uint32_t rx_bit_shift = 4;
- uint32_t power_mode, peer_granularity;
+ uint32_t peer_granularity;
/* Update lanes with available TX/RX lanes */
if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
@@ -352,9 +352,11 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
/* Before changing gears, first change the number of lanes. */
- if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
+ if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode,
+ &ctrlr->tx_rx_power_mode))
return (ENXIO);
- if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode,
+ ctrlr->tx_rx_power_mode))
return (ENXIO);
/* Wait for power mode changed. */
@@ -415,8 +417,8 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
return (ENXIO);
/* Set TX/RX PWRMode */
- power_mode = (fast_mode << rx_bit_shift) | fast_mode;
- if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ ctrlr->tx_rx_power_mode = (fast_mode << rx_bit_shift) | fast_mode;
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, ctrlr->tx_rx_power_mode))
return (ENXIO);
/* Wait for power mode changed. */
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
index 606f2a095576..b6b8124bc3a6 100644
--- a/sys/dev/ufshci/ufshci_pci.c
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -34,8 +34,7 @@ static device_method_t ufshci_pci_methods[] = {
DEVMETHOD(device_attach, ufshci_pci_attach),
DEVMETHOD(device_detach, ufshci_pci_detach),
DEVMETHOD(device_suspend, ufshci_pci_suspend),
- DEVMETHOD(device_resume, ufshci_pci_resume),
- DEVMETHOD_END
+ DEVMETHOD(device_resume, ufshci_pci_resume), DEVMETHOD_END
};
static driver_t ufshci_pci_driver = {
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
index 1634cf51c9fb..067b51a419e8 100644
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -315,10 +315,15 @@ struct ufshci_controller {
#define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \
16 /* QEMU does not support Task Management Request */
#define UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS \
- 32 /* QEMU does not support Well known logical units*/
+ 32 /* QEMU does not support Well known logical units */
#define UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE \
64 /* Some controllers have the Auto hibernate feature enabled but it \
does not work. */
+#define UFSHCI_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH \
+ 128 /* Some controllers need to reinit the device after gear switch. \
+ */
+#define UFSHCI_QUIRK_BROKEN_LSDBS_MCQS_CAP \
+ 256 /* Some controllers have their LSDB and MCQS fields reset to 0. */
uint32_t ref_clk;
@@ -391,12 +396,13 @@ struct ufshci_controller {
/* UFS Transport Protocol Layer (UTP) */
struct ufshci_req_queue task_mgmt_req_queue;
struct ufshci_req_queue transfer_req_queue;
- bool is_single_db_supported; /* 0 = supported */
- bool is_mcq_supported; /* 1 = supported */
+ bool is_single_db_supported;
+ bool is_mcq_supported;
/* UFS Interconnect Layer (UIC) */
struct mtx uic_cmd_lock;
- uint8_t hs_gear;
+ uint32_t tx_rx_power_mode;
+ uint32_t hs_gear;
uint32_t tx_lanes;
uint32_t rx_lanes;
uint32_t max_rx_hs_gear;
@@ -442,6 +448,7 @@ int ufshci_ctrlr_suspend(struct ufshci_controller *ctrlr,
int ufshci_ctrlr_resume(struct ufshci_controller *ctrlr,
enum power_stype stype);
int ufshci_ctrlr_disable(struct ufshci_controller *ctrlr);
+int ufshci_ctrlr_enable(struct ufshci_controller *ctrlr);
/* ctrlr defined as void * to allow use with config_intrhook. */
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
index ca47aa159c5b..54542f48b32c 100644
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -374,34 +374,63 @@ ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ int error = 0;
+
+ mtx_lock(&hwq->recovery_lock);
+ mtx_lock(&hwq->qlock);
if (req_queue->is_task_mgmt) {
uint32_t hcs, utmrldbr, utmrlrsr;
+ uint32_t utmrlba, utmrlbau;
+
+ /*
+ * Some controllers require re-enabling. When a controller is
+ * re-enabled, the utmrlba registers are initialized, and these
+ * must be reconfigured upon re-enabling.
+ */
+ utmrlba = hwq->req_queue_addr & 0xffffffff;
+ utmrlbau = hwq->req_queue_addr >> 32;
+ ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
+ ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau);
hcs = ufshci_mmio_read_4(ctrlr, hcs);
if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) {
ufshci_printf(ctrlr,
"UTP task management request list is not ready\n");
- return (ENXIO);
+ error = ENXIO;
+ goto out;
}
utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
if (utmrldbr != 0) {
ufshci_printf(ctrlr,
"UTP task management request list door bell is not ready\n");
- return (ENXIO);
+ error = ENXIO;
+ goto out;
}
utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR);
ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr);
} else {
uint32_t hcs, utrldbr, utrlcnr, utrlrsr;
+ uint32_t utrlba, utrlbau;
+
+ /*
+ * Some controllers require re-enabling. When a controller is
+ * re-enabled, the utrlba registers are initialized, and these
+ * must be reconfigured upon re-enabling.
+ */
+ utrlba = hwq->req_queue_addr & 0xffffffff;
+ utrlbau = hwq->req_queue_addr >> 32;
+ ufshci_mmio_write_4(ctrlr, utrlba, utrlba);
+ ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau);
hcs = ufshci_mmio_read_4(ctrlr, hcs);
if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) {
ufshci_printf(ctrlr,
"UTP transfer request list is not ready\n");
- return (ENXIO);
+ error = ENXIO;
+ goto out;
}
utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
@@ -434,7 +463,10 @@ ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
hwq->recovery_state = RECOVERY_NONE;
- return (0);
+out:
+ mtx_unlock(&hwq->qlock);
+ mtx_unlock(&hwq->recovery_lock);
+ return (error);
}
int
diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c
index 495f087f3c50..a113e951798e 100644
--- a/sys/dev/ufshci/ufshci_sysctl.c
+++ b/sys/dev/ufshci/ufshci_sysctl.c
@@ -193,7 +193,7 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
CTLFLAG_RD, &ctrlr->num_io_queues, 0, "Number of I/O queue pairs");
SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD,
- &ctrlr->cap, 0, "Number of I/O queue pairs");
+ &ctrlr->cap, 0, "Host controller capabilities register value");
SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled",
CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable");
@@ -214,10 +214,6 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
&dev->wb_user_space_config_option, 0,
"WriteBooster preserve user space mode");
- SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "power_mode_supported",
- CTLFLAG_RD, &dev->power_mode_supported, 0,
- "Device power mode support");
-
SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO,
"auto_hibernation_supported", CTLFLAG_RD,
&dev->auto_hibernation_supported, 0,
@@ -229,9 +225,38 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
ufshci_sysctl_ahit, "IU",
"Auto-Hibernate Idle Timer Value (in microseconds)");
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "power_mode_supported",
+ CTLFLAG_RD, &dev->power_mode_supported, 0,
+ "Device power mode support");
+
SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "power_mode",
CTLFLAG_RD, &dev->power_mode, 0, "Current device power mode");
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "tx_rx_power_mode",
+ CTLFLAG_RD, &ctrlr->tx_rx_power_mode, 0,
+ "Current TX/RX PA_PWRMode value");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "max_tx_lanes",
+ CTLFLAG_RD, &ctrlr->max_tx_lanes, 0,
+ "Maximum available TX data lanes");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "max_rx_lanes",
+ CTLFLAG_RD, &ctrlr->max_rx_lanes, 0,
+ "Maximum available RX data lanes");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "tx_lanes", CTLFLAG_RD,
+ &ctrlr->tx_lanes, 0, "Active TX data lanes");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "rx_lanes", CTLFLAG_RD,
+ &ctrlr->rx_lanes, 0, "Active RX data lanes");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "max_rx_hs_gear",
+ CTLFLAG_RD, &ctrlr->max_rx_hs_gear, 0,
+ "Maximum available RX HS gear");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "hs_gear", CTLFLAG_RD,
+ &ctrlr->hs_gear, 0, "Active HS gear");
+
SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period,
0, ufshci_sysctl_timeout_period, "IU",
diff --git a/sys/dev/usb/input/ukbd.c b/sys/dev/usb/input/ukbd.c
index 57e9beac34b6..7a33a9ad2efe 100644
--- a/sys/dev/usb/input/ukbd.c
+++ b/sys/dev/usb/input/ukbd.c
@@ -95,18 +95,23 @@
#ifdef USB_DEBUG
static int ukbd_debug = 0;
+#endif
static int ukbd_no_leds = 0;
static int ukbd_pollrate = 0;
+static int ukbd_apple_fn_mode = 0;
static SYSCTL_NODE(_hw_usb, OID_AUTO, ukbd, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"USB keyboard");
+#ifdef USB_DEBUG
SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, debug, CTLFLAG_RWTUN,
&ukbd_debug, 0, "Debug level");
+#endif
SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, no_leds, CTLFLAG_RWTUN,
&ukbd_no_leds, 0, "Disables setting of keyboard leds");
SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, pollrate, CTLFLAG_RWTUN,
&ukbd_pollrate, 0, "Force this polling rate, 1-1000Hz");
-#endif
+SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, apple_fn_mode, CTLFLAG_RWTUN,
+ &ukbd_apple_fn_mode, 0, "0 = Fn + F1..12 -> media, 1 = F1..F12 -> media");
#define UKBD_EMULATE_ATSCANCODE 1
#define UKBD_DRIVER_NAME "ukbd"
@@ -123,6 +128,10 @@ SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, pollrate, CTLFLAG_RWTUN,
#define MOD_EJECT 0x01
#define MOD_FN 0x02
+/* check evdev_usb_scancodes[] for names */
+#define APPLE_FN_KEY 0xff
+#define APPLE_EJECT_KEY 0xec
+
struct ukbd_data {
uint64_t bitmap[howmany(UKBD_NKEYCODE, 64)];
};
@@ -198,6 +207,7 @@ struct ukbd_softc {
uint16_t sc_inputs;
uint16_t sc_inputhead;
uint16_t sc_inputtail;
+ uint16_t sc_vendor_id;
uint8_t sc_leds; /* store for async led requests */
uint8_t sc_iface_index;
@@ -282,9 +292,9 @@ static const uint8_t ukbd_trtab[256] = {
NN, NN, NN, NN, NN, NN, NN, NN, /* D0 - D7 */
NN, NN, NN, NN, NN, NN, NN, NN, /* D8 - DF */
29, 42, 56, 105, 90, 54, 93, 106, /* E0 - E7 */
- NN, NN, NN, NN, NN, NN, NN, NN, /* E8 - EF */
+ NN, NN, NN, NN, 254, NN, NN, NN, /* E8 - EF */
NN, NN, NN, NN, NN, NN, NN, NN, /* F0 - F7 */
- NN, NN, NN, NN, NN, NN, NN, NN, /* F8 - FF */
+ NN, NN, NN, NN, NN, NN, NN, 255, /* F8 - FF */
};
static const uint8_t ukbd_boot_desc[] = {
@@ -582,14 +592,14 @@ ukbd_interrupt(struct ukbd_softc *sc)
sc->sc_repeat_key = 0;
} else {
ukbd_put_key(sc, key | KEY_PRESS);
-
- sc->sc_co_basetime = sbinuptime();
- sc->sc_delay = sc->sc_kbd.kb_delay1;
- ukbd_start_timer(sc);
-
- /* set repeat time for last key */
- sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1;
- sc->sc_repeat_key = key;
+ if (key != APPLE_FN_KEY) {
+ sc->sc_co_basetime = sbinuptime();
+ sc->sc_delay = sc->sc_kbd.kb_delay1;
+ ukbd_start_timer(sc);
+ /* set repeat time for last key */
+ sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1;
+ sc->sc_repeat_key = key;
+ }
}
}
}
@@ -669,6 +679,16 @@ static uint32_t
ukbd_apple_fn(uint32_t keycode)
{
switch (keycode) {
+ case 0x0b: return 0x50; /* H -> LEFT ARROW */
+ case 0x0d: return 0x51; /* J -> DOWN ARROW */
+ case 0x0e: return 0x52; /* K -> UP ARROW */
+ case 0x0f: return 0x4f; /* L -> RIGHT ARROW */
+ case 0x36: return 0x4a; /* COMMA -> HOME */
+ case 0x37: return 0x4d; /* DOT -> END */
+ case 0x18: return 0x4b; /* U -> PGUP */
+ case 0x07: return 0x4e; /* D -> PGDN */
+ case 0x16: return 0x47; /* S -> SCROLLLOCK */
+ case 0x13: return 0x46; /* P -> SYSRQ/PRTSC */
case 0x28: return 0x49; /* RETURN -> INSERT */
case 0x2a: return 0x4c; /* BACKSPACE -> DEL */
case 0x50: return 0x4a; /* LEFT ARROW -> HOME */
@@ -679,6 +699,27 @@ ukbd_apple_fn(uint32_t keycode)
}
}
+/* separate so the sysctl doesn't butcher non-fn keys */
+static uint32_t
+ukbd_apple_fn_media(uint32_t keycode)
+{
+ switch (keycode) {
+ case 0x3a: return 0xc0; /* F1 -> BRIGHTNESS DOWN */
+ case 0x3b: return 0xc1; /* F2 -> BRIGHTNESS UP */
+ case 0x3c: return 0xc2; /* F3 -> SCALE (MISSION CTRL)*/
+ case 0x3d: return 0xc3; /* F4 -> DASHBOARD (LAUNCHPAD) */
+ case 0x3e: return 0xc4; /* F5 -> KBD BACKLIGHT DOWN */
+ case 0x3f: return 0xc5; /* F6 -> KBD BACKLIGHT UP */
+ case 0x40: return 0xea; /* F7 -> MEDIA PREV */
+ case 0x41: return 0xe8; /* F8 -> PLAY/PAUSE */
+ case 0x42: return 0xeb; /* F9 -> MEDIA NEXT */
+ case 0x43: return 0xef; /* F10 -> MUTE */
+ case 0x44: return 0xee; /* F11 -> VOLUME DOWN */
+ case 0x45: return 0xed; /* F12 -> VOLUME UP */
+ default: return keycode;
+ }
+}
+
static uint32_t
ukbd_apple_swap(uint32_t keycode)
{
@@ -740,18 +781,34 @@ ukbd_intr_callback(struct usb_xfer *xfer, usb_error_t error)
/* clear modifiers */
modifiers = 0;
- /* scan through HID data */
+ /* scan through HID data and expose magic apple keys */
if ((sc->sc_flags & UKBD_FLAG_APPLE_EJECT) &&
(id == sc->sc_id_apple_eject)) {
- if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_eject))
+ if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_eject)) {
+ sc->sc_ndata.bitmap[APPLE_EJECT_KEY / 64] |=
+ 1ULL << (APPLE_EJECT_KEY % 64);
modifiers |= MOD_EJECT;
+ } else {
+ sc->sc_ndata.bitmap[APPLE_EJECT_KEY / 64] &=
+ ~(1ULL << (APPLE_EJECT_KEY % 64));
+ }
}
if ((sc->sc_flags & UKBD_FLAG_APPLE_FN) &&
(id == sc->sc_id_apple_fn)) {
- if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_fn))
+ if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_fn)) {
+ sc->sc_ndata.bitmap[APPLE_FN_KEY / 64] |=
+ 1ULL << (APPLE_FN_KEY % 64);
modifiers |= MOD_FN;
+ } else {
+ sc->sc_ndata.bitmap[APPLE_FN_KEY / 64] &=
+ ~(1ULL << (APPLE_FN_KEY % 64));
+ }
}
+ int apply_apple_fn_media = (modifiers & MOD_FN) ? 1 : 0;
+ if (ukbd_apple_fn_mode) /* toggle from sysctl value */
+ apply_apple_fn_media = !apply_apple_fn_media;
+
for (i = 0; i != UKBD_NKEYCODE; i++) {
const uint64_t valid = sc->sc_loc_key_valid[i / 64];
const uint64_t mask = 1ULL << (i % 64);
@@ -780,6 +837,8 @@ ukbd_intr_callback(struct usb_xfer *xfer, usb_error_t error)
}
if (modifiers & MOD_FN)
key = ukbd_apple_fn(key);
+ if (apply_apple_fn_media)
+ key = ukbd_apple_fn_media(key);
if (sc->sc_flags & UKBD_FLAG_APPLE_SWAP)
key = ukbd_apple_swap(key);
if (key == KEY_NONE || key >= UKBD_NKEYCODE)
@@ -792,6 +851,8 @@ ukbd_intr_callback(struct usb_xfer *xfer, usb_error_t error)
if (modifiers & MOD_FN)
key = ukbd_apple_fn(key);
+ if (apply_apple_fn_media)
+ key = ukbd_apple_fn_media(key);
if (sc->sc_flags & UKBD_FLAG_APPLE_SWAP)
key = ukbd_apple_swap(key);
if (key == KEY_NONE || key == KEY_ERROR || key >= UKBD_NKEYCODE)
@@ -1045,21 +1106,37 @@ ukbd_parse_hid(struct ukbd_softc *sc, const uint8_t *ptr, uint32_t len)
hid_input, &sc->sc_kbd_id);
/* investigate if this is an Apple Keyboard */
- if (hid_locate(ptr, len,
- HID_USAGE2(HUP_CONSUMER, HUG_APPLE_EJECT),
- hid_input, 0, &sc->sc_loc_apple_eject, &flags,
- &sc->sc_id_apple_eject)) {
- if (flags & HIO_VARIABLE)
- sc->sc_flags |= UKBD_FLAG_APPLE_EJECT;
- DPRINTFN(1, "Found Apple eject-key\n");
- }
- if (hid_locate(ptr, len,
- HID_USAGE2(0xFFFF, 0x0003),
- hid_input, 0, &sc->sc_loc_apple_fn, &flags,
- &sc->sc_id_apple_fn)) {
- if (flags & HIO_VARIABLE)
- sc->sc_flags |= UKBD_FLAG_APPLE_FN;
- DPRINTFN(1, "Found Apple FN-key\n");
+ if (sc->sc_vendor_id == USB_VENDOR_APPLE) {
+ if (hid_locate(ptr, len,
+ HID_USAGE2(HUP_CONSUMER, HUG_APPLE_EJECT),
+ hid_input, 0, &sc->sc_loc_apple_eject, &flags,
+ &sc->sc_id_apple_eject)) {
+ if (flags & HIO_VARIABLE)
+ sc->sc_flags |= UKBD_FLAG_APPLE_EJECT;
+ DPRINTFN(1, "Found Apple eject-key\n");
+ }
+ /*
+ * check the same vendor pages that linux does to find the one
+ * apple uses for the function key.
+ */
+ static const uint16_t apple_pages[] = {
+ HUP_APPLE, /* HID_UP_CUSTOM in linux */
+ HUP_MICROSOFT, /* HID_UP_MSVENDOR in linux */
+ HUP_HP, /* HID_UP_HPVENDOR2 in linux */
+ 0xFFFF /* Original FreeBSD check (Remove?) */
+ };
+ for (int i = 0; i < (int)nitems(apple_pages); i++) {
+ if (hid_locate(ptr, len,
+ HID_USAGE2(apple_pages[i], 0x0003),
+ hid_input, 0, &sc->sc_loc_apple_fn, &flags,
+ &sc->sc_id_apple_fn)) {
+ if (flags & HIO_VARIABLE)
+ sc->sc_flags |= UKBD_FLAG_APPLE_FN;
+ DPRINTFN(1, "Found Apple FN-key on page 0x%04x\n",
+ apple_pages[i]);
+ break;
+ }
+ }
}
/* figure out event buffer */
@@ -1147,6 +1224,7 @@ ukbd_attach(device_t dev)
sc->sc_udev = uaa->device;
sc->sc_iface = uaa->iface;
+ sc->sc_vendor_id = uaa->info.idVendor;
sc->sc_iface_index = uaa->info.bIfaceIndex;
sc->sc_iface_no = uaa->info.bIfaceNum;
sc->sc_mode = K_XLATE;
diff --git a/sys/dev/usb/input/wsp.c b/sys/dev/usb/input/wsp.c
index f78d64f69c08..2d7e3b796b17 100644
--- a/sys/dev/usb/input/wsp.c
+++ b/sys/dev/usb/input/wsp.c
@@ -231,6 +231,7 @@ enum tp_type {
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
+#define SUPPORTS_FORCETOUCH 2
/* trackpad finger data block size */
#define FSIZE_TYPE1 (14 * 2)
@@ -285,7 +286,7 @@ struct wsp_tp {
.delta = 0,
},
[TYPE4] = {
- .caps = HAS_INTEGRATED_BUTTON,
+ .caps = HAS_INTEGRATED_BUTTON | SUPPORTS_FORCETOUCH,
.button = BUTTON_TYPE4,
.offset = FINGER_TYPE4,
.fsize = FSIZE_TYPE4,
@@ -896,7 +897,8 @@ wsp_attach(device_t dev)
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_POSITION_X, sc->sc_params->x);
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_POSITION_Y, sc->sc_params->y);
/* finger pressure */
- WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_PRESSURE, sc->sc_params->p);
+ if ((sc->sc_params->tp->caps & SUPPORTS_FORCETOUCH) != 0)
+ WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_PRESSURE, sc->sc_params->p);
/* finger major/minor axis */
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_TOUCH_MAJOR, sc->sc_params->w);
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_TOUCH_MINOR, sc->sc_params->w);
@@ -1066,6 +1068,10 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
if (evdev_rcpt_mask & EVDEV_RCPT_HW_MOUSE) {
evdev_push_key(sc->sc_evdev, BTN_LEFT, ibt);
evdev_sync(sc->sc_evdev);
+ if ((sc->sc_fflags & FREAD) == 0 ||
+ usb_fifo_put_bytes_max(
+ sc->sc_fifo.fp[USB_FIFO_RX]) == 0)
+ goto tr_setup;
}
#endif
sc->sc_status.flags &= ~MOUSE_POSCHANGED;
@@ -1355,7 +1361,12 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
case USB_ST_SETUP:
tr_setup:
/* check if we can put more data into the FIFO */
- if (usb_fifo_put_bytes_max(
+ if (
+#ifdef EVDEV_SUPPORT
+ ((evdev_rcpt_mask & EVDEV_RCPT_HW_MOUSE) != 0 &&
+ (sc->sc_state & WSP_EVDEV_OPENED) != 0) ||
+#endif
+ usb_fifo_put_bytes_max(
sc->sc_fifo.fp[USB_FIFO_RX]) != 0) {
usbd_xfer_set_frame_len(xfer, 0,
sc->tp_datalen);
diff --git a/sys/dev/usb/net/if_smsc.c b/sys/dev/usb/net/if_smsc.c
index 0ebbf8482446..8e16b8609144 100644
--- a/sys/dev/usb/net/if_smsc.c
+++ b/sys/dev/usb/net/if_smsc.c
@@ -1585,7 +1585,7 @@ smsc_bootargs_get_mac_addr(device_t dev, struct usb_ether *ue)
node = OF_finddevice("/chosen");
if (node == -1)
return (false);
- if (OF_hasprop(node, "bootargs") == 0) {
+ if (!OF_hasprop(node, "bootargs")) {
smsc_dbg_printf((struct smsc_softc *)ue->ue_sc,
"bootargs not found");
return (false);
diff --git a/sys/dev/usb/net/if_ure.c b/sys/dev/usb/net/if_ure.c
index 257051e6e379..aca510b3b863 100644
--- a/sys/dev/usb/net/if_ure.c
+++ b/sys/dev/usb/net/if_ure.c
@@ -128,6 +128,7 @@ static usb_callback_t ure_bulk_write_callback;
static miibus_readreg_t ure_miibus_readreg;
static miibus_writereg_t ure_miibus_writereg;
static miibus_statchg_t ure_miibus_statchg;
+static miibus_linkchg_t ure_miibus_linkchg;
static uether_fn_t ure_attach_post;
static uether_fn_t ure_init;
@@ -184,6 +185,7 @@ static device_method_t ure_methods[] = {
DEVMETHOD(miibus_readreg, ure_miibus_readreg),
DEVMETHOD(miibus_writereg, ure_miibus_writereg),
DEVMETHOD(miibus_statchg, ure_miibus_statchg),
+ DEVMETHOD(miibus_linkchg, ure_miibus_linkchg),
DEVMETHOD_END
};
@@ -443,6 +445,8 @@ ure_miibus_statchg(device_t dev)
struct mii_data *mii;
if_t ifp;
int locked;
+ uint16_t bmsr;
+ bool new_link, old_link;
sc = device_get_softc(dev);
mii = GET_MII(sc);
@@ -455,6 +459,7 @@ ure_miibus_statchg(device_t dev)
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
goto done;
+ old_link = (sc->sc_flags & URE_FLAG_LINK) ? true : false;
sc->sc_flags &= ~URE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
@@ -475,14 +480,72 @@ ure_miibus_statchg(device_t dev)
}
}
- /* Lost link, do nothing. */
- if ((sc->sc_flags & URE_FLAG_LINK) == 0)
- goto done;
+ new_link = (sc->sc_flags & URE_FLAG_LINK) ? true : false;
+ if (old_link && !new_link) {
+ /*
+ * MII layer reports link down. Verify by reading
+ * the PHY BMSR register directly. BMSR link status
+ * is latched-low, so read twice: first clears any
+ * stale latch, second gives current state.
+ */
+ (void)ure_ocp_reg_read(sc,
+ URE_OCP_BASE_MII + MII_BMSR * 2);
+ bmsr = ure_ocp_reg_read(sc,
+ URE_OCP_BASE_MII + MII_BMSR * 2);
+
+ if (bmsr & BMSR_LINK) {
+ /*
+ * PHY still has link. This is a spurious
+ * link-down from the MII polling race (see
+ * PR 252165). Restore IFM_ACTIVE so the
+ * subsequent MIIBUS_LINKCHG check in
+ * mii_phy_update sees no change.
+ */
+ device_printf(dev,
+ "spurious link down (PHY link up), overriding\n");
+ sc->sc_flags |= URE_FLAG_LINK;
+ mii->mii_media_status |= IFM_ACTIVE;
+ }
+ }
done:
if (!locked)
URE_UNLOCK(sc);
}
+static void
+ure_miibus_linkchg(device_t dev)
+{
+ struct ure_softc *sc;
+ struct mii_data *mii;
+ int locked;
+ uint16_t bmsr;
+
+ sc = device_get_softc(dev);
+ mii = GET_MII(sc);
+ locked = mtx_owned(&sc->sc_mtx);
+ if (locked == 0)
+ URE_LOCK(sc);
+
+ /*
+ * This is called by the default miibus linkchg handler
+ * before it calls if_link_state_change(). If the PHY
+ * still has link but the MII layer lost IFM_ACTIVE due
+ * to the polling race (see PR 252165), restore it so the
+ * notification goes out as LINK_STATE_UP rather than DOWN.
+ */
+ if (mii != NULL && (mii->mii_media_status & IFM_ACTIVE) == 0) {
+ (void)ure_ocp_reg_read(sc,
+ URE_OCP_BASE_MII + MII_BMSR * 2);
+ bmsr = ure_ocp_reg_read(sc,
+ URE_OCP_BASE_MII + MII_BMSR * 2);
+ if (bmsr & BMSR_LINK)
+ mii->mii_media_status |= IFM_ACTIVE;
+ }
+
+ if (locked == 0)
+ URE_UNLOCK(sc);
+}
+
/*
* Probe for a RTL8152/RTL8153/RTL8156 chip.
*/
diff --git a/sys/dev/usb/serial/uvscom.c b/sys/dev/usb/serial/uvscom.c
index b9add5c1b37b..c5086f7e86cf 100644
--- a/sys/dev/usb/serial/uvscom.c
+++ b/sys/dev/usb/serial/uvscom.c
@@ -551,8 +551,9 @@ uvscom_pre_param(struct ucom_softc *ucom, struct termios *t)
case B38400:
case B57600:
case B115200:
+ break;
default:
- return (EINVAL);
+ return (EINVAL);
}
return (0);
}
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index bb039f59ce19..b0934cd63a92 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -786,6 +786,7 @@ vendor PERASO 0x2932 Peraso Technologies, Inc.
vendor PLANEX 0x2c02 Planex Communications
vendor MERCUSYS 0x2c4e Mercusys, Inc.
vendor QUECTEL 0x2c7c Quectel Wireless Solutions
+vendor NUAND 0x2cf0 Nuand LLC
vendor VIDZMEDIA 0x3275 VidzMedia Pte Ltd
vendor LINKINSTRUMENTS 0x3195 Link Instruments Inc.
vendor AEI 0x3334 AEI
@@ -1695,6 +1696,7 @@ product CYBERTAN RT2870 0x1828 RT2870
/* Cypress Semiconductor products */
product CYPRESS MOUSE 0x0001 mouse
product CYPRESS THERMO 0x0002 thermometer
+product CYPRESS FX3 0x00f3 EZ-USB FX3
product CYPRESS WISPY1A 0x0bad MetaGeek Wi-Spy
product CYPRESS KBDHUB 0x0101 Keyboard/Hub
product CYPRESS FMRADIO 0x1002 FM Radio
@@ -3556,6 +3558,11 @@ product NIKON E990 0x0102 Digital Camera E990
product NIKON LS40 0x4000 CoolScan LS40 ED
product NIKON D300 0x041a Digital Camera D300
+/* Nuand LLC products */
+product NUAND BLADERF 0x5246 bladeRF Software Defined Radio
+product NUAND BLADERF_BL 0x5247 bladeRF Bootloader
+product NUAND BLADERF2 0x5250 bladeRF 2.0 Software Defined Radio
+
/* NovaTech Products */
product NOVATECH NV902 0x9020 NovaTech NV-902W
product NOVATECH RT2573 0x9021 RT2573
diff --git a/sys/dev/virtio/block/virtio_blk.c b/sys/dev/virtio/block/virtio_blk.c
index 4cedb9b45a0e..96846eb0529a 100644
--- a/sys/dev/virtio/block/virtio_blk.c
+++ b/sys/dev/virtio/block/virtio_blk.c
@@ -931,10 +931,57 @@ vtblk_hdr_load_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
}
static int
+vtblk_create_request(struct vtblk_softc *sc, struct vtblk_request *req)
+{
+ req->vbr_sc = sc;
+
+ if (bus_dmamap_create(sc->vtblk_dmat, 0, &req->vbr_mapp))
+ goto error_free;
+
+ if (bus_dmamem_alloc(sc->vtblk_hdr_dmat, (void **)&req->vbr_hdr,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &req->vbr_hdr_mapp))
+ goto error_destroy;
+
+ if (bus_dmamem_alloc(sc->vtblk_ack_dmat, (void **)&req->vbr_ack,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &req->vbr_ack_mapp))
+ goto error_hdr_free;
+
+ MPASS(sglist_count(req->vbr_hdr, sizeof(*req->vbr_hdr)) == 1);
+ MPASS(sglist_count(req->vbr_ack, sizeof(*req->vbr_ack)) == 1);
+
+ if (bus_dmamap_load(sc->vtblk_hdr_dmat, req->vbr_hdr_mapp,
+ req->vbr_hdr, sizeof(struct virtio_blk_outhdr),
+ vtblk_hdr_load_callback, req, BUS_DMA_NOWAIT))
+ goto error_ack_free;
+
+ if (bus_dmamap_load(sc->vtblk_ack_dmat, req->vbr_ack_mapp,
+ req->vbr_ack, sizeof(uint8_t), vtblk_ack_load_callback,
+ req, BUS_DMA_NOWAIT))
+ goto error_hdr_unload;
+
+ return (0);
+
+error_hdr_unload:
+ bus_dmamap_unload(sc->vtblk_hdr_dmat, req->vbr_hdr_mapp);
+error_ack_free:
+ bus_dmamem_free(sc->vtblk_ack_dmat, req->vbr_ack, req->vbr_ack_mapp);
+error_hdr_free:
+ bus_dmamem_free(sc->vtblk_hdr_dmat, req->vbr_hdr, req->vbr_hdr_mapp);
+error_destroy:
+ bus_dmamap_destroy(sc->vtblk_dmat, req->vbr_mapp);
+error_free:
+
+ return (ENOMEM);
+}
+
+static int
vtblk_request_prealloc(struct vtblk_softc *sc)
{
struct vtblk_request *req;
int i, nreqs;
+ int error;
nreqs = virtqueue_size(sc->vtblk_vq);
@@ -951,52 +998,19 @@ vtblk_request_prealloc(struct vtblk_softc *sc)
if (req == NULL)
return (ENOMEM);
- req->vbr_sc = sc;
-
- if (bus_dmamap_create(sc->vtblk_dmat, 0, &req->vbr_mapp))
- goto error_free;
-
- if (bus_dmamem_alloc(sc->vtblk_hdr_dmat, (void **)&req->vbr_hdr,
- BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
- &req->vbr_hdr_mapp))
- goto error_destroy;
-
- if (bus_dmamem_alloc(sc->vtblk_ack_dmat, (void **)&req->vbr_ack,
- BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
- &req->vbr_ack_mapp))
- goto error_hdr_free;
-
- MPASS(sglist_count(req->vbr_hdr, sizeof(*req->vbr_hdr)) == 1);
- MPASS(sglist_count(req->vbr_ack, sizeof(*req->vbr_ack)) == 1);
-
- if (bus_dmamap_load(sc->vtblk_hdr_dmat, req->vbr_hdr_mapp,
- req->vbr_hdr, sizeof(struct virtio_blk_outhdr),
- vtblk_hdr_load_callback, req, BUS_DMA_NOWAIT))
- goto error_ack_free;
-
- if (bus_dmamap_load(sc->vtblk_ack_dmat, req->vbr_ack_mapp,
- req->vbr_ack, sizeof(uint8_t), vtblk_ack_load_callback,
- req, BUS_DMA_NOWAIT))
- goto error_hdr_unload;
+ error = vtblk_create_request(sc, req);
+ if (error) {
+ free(req, M_DEVBUF);
+ return (error);
+ }
sc->vtblk_request_count++;
vtblk_request_enqueue(sc, req);
}
- return (0);
+ error = vtblk_create_request(sc, &sc->vtblk_dump_request);
-error_hdr_unload:
- bus_dmamap_unload(sc->vtblk_hdr_dmat, req->vbr_hdr_mapp);
-error_ack_free:
- bus_dmamem_free(sc->vtblk_ack_dmat, req->vbr_ack, req->vbr_ack_mapp);
-error_hdr_free:
- bus_dmamem_free(sc->vtblk_hdr_dmat, req->vbr_hdr, req->vbr_hdr_mapp);
-error_destroy:
- bus_dmamap_destroy(sc->vtblk_dmat, req->vbr_mapp);
-error_free:
- free(req, M_DEVBUF);
-
- return (ENOMEM);
+ return (error);
}
static void
diff --git a/sys/dev/virtio/pci/virtio_pci_modern.c b/sys/dev/virtio/pci/virtio_pci_modern.c
index 108fd2b5f8e9..baf7c448bb95 100644
--- a/sys/dev/virtio/pci/virtio_pci_modern.c
+++ b/sys/dev/virtio/pci/virtio_pci_modern.c
@@ -191,7 +191,7 @@ static void vtpci_modern_write_device_8(struct vtpci_modern_softc *,
/* Tunables. */
SYSCTL_DECL(_hw_virtio_pci);
-static int vtpci_modern_transitional = 0;
+static int vtpci_modern_transitional = 1;
SYSCTL_INT(_hw_virtio_pci, OID_AUTO, transitional, CTLFLAG_RDTUN,
&vtpci_modern_transitional, 0,
"If 0, a transitional VirtIO device is used in legacy mode; otherwise, in modern mode.");
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index b7fdb4703ccb..10b5179bd3d5 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -341,7 +341,7 @@ virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
align = size;
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
- align, /* alignment */
+ roundup_pow_of_two(align), /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
diff --git a/sys/dev/vmgenc/vmgenc_acpi.c b/sys/dev/vmgenc/vmgenc_acpi.c
index 18519a8e4f22..59fcbd5346ba 100644
--- a/sys/dev/vmgenc/vmgenc_acpi.c
+++ b/sys/dev/vmgenc/vmgenc_acpi.c
@@ -261,4 +261,4 @@ static driver_t vmgenc_driver = {
DRIVER_MODULE(vmgenc, acpi, vmgenc_driver, NULL, NULL);
MODULE_DEPEND(vmgenc, acpi, 1, 1, 1);
-MODULE_DEPEND(vemgenc, random_harvestq, 1, 1, 1);
+MODULE_DEPEND(vmgenc, random_harvestq, 1, 1, 1);
diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c
index ed8e5b2e0777..a2775023838a 100644
--- a/sys/dev/vmm/vmm_dev.c
+++ b/sys/dev/vmm/vmm_dev.c
@@ -114,7 +114,7 @@ static int devmem_create_cdev(struct vmmdev_softc *sc, int id, char *devmem);
static void vmmdev_destroy(struct vmmdev_softc *sc);
static int
-vmm_priv_check(struct ucred *ucred)
+vmm_jail_priv_check(struct ucred *ucred)
{
if (jailed(ucred) &&
(ucred->cr_prison->pr_allow & pr_allow_vmm_flag) == 0)
@@ -371,7 +371,7 @@ vmmdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
* A jail without vmm access shouldn't be able to access vmm device
* files at all, but check here just to be thorough.
*/
- error = vmm_priv_check(td->td_ucred);
+ error = vmm_jail_priv_check(td->td_ucred);
if (error != 0)
return (error);
@@ -940,7 +940,7 @@ sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS)
char *buf;
int error, buflen;
- error = vmm_priv_check(req->td->td_ucred);
+ error = vmm_jail_priv_check(req->td->td_ucred);
if (error)
return (error);
@@ -1016,6 +1016,12 @@ vmmdev_create(const char *name, uint32_t flags, struct ucred *cred)
"An unprivileged user must run VMs in monitor mode"));
}
+ if ((error = vmm_jail_priv_check(cred)) != 0) {
+ sx_xunlock(&vmmdev_mtx);
+ return (EXTERROR(error,
+ "VMs cannot be created in the current jail"));
+ }
+
if (!chgvmmcnt(cred->cr_ruidinfo, 1, vm_maxvmms)) {
sx_xunlock(&vmmdev_mtx);
return (ENOMEM);
@@ -1061,7 +1067,7 @@ sysctl_vmm_create(SYSCTL_HANDLER_ARGS)
if (!vmm_initialized)
return (ENXIO);
- error = vmm_priv_check(req->td->td_ucred);
+ error = vmm_jail_priv_check(req->td->td_ucred);
if (error != 0)
return (error);
@@ -1126,7 +1132,7 @@ vmmctl_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
int error;
struct vmmctl_priv *priv;
- error = vmm_priv_check(td->td_ucred);
+ error = vmm_jail_priv_check(td->td_ucred);
if (error != 0)
return (error);
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index b9159a73ad79..0ca4bb8d4d49 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -1684,8 +1684,13 @@ vtterm_splash(struct vt_device *vd)
uintptr_t image;
vt_axis_t top, left;
- si = MD_FETCH(preload_kmdp, MODINFOMD_SPLASH, struct splash_info *);
if (!(vd->vd_flags & VDF_TEXTMODE) && (boothowto & RB_MUTE)) {
+ if (rebooting == 1) {
+ si = MD_FETCH(preload_kmdp, MODINFOMD_SHTDWNSPLASH, struct splash_info *);
+ vd->vd_driver->vd_blank(vd, TC_BLACK);
+ } else {
+ si = MD_FETCH(preload_kmdp, MODINFOMD_SPLASH, struct splash_info *);
+ }
if (si == NULL) {
top = (vd->vd_height - vt_logo_height) / 2;
left = (vd->vd_width - vt_logo_width) / 2;
@@ -1832,6 +1837,15 @@ vt_init_font_static(void)
vt_font_assigned = font;
}
+#ifdef DEV_SPLASH
+static int
+vt_shutdown_splash(struct vt_window *vw)
+{
+ vtterm_splash(vw->vw_device);
+ return (0);
+}
+#endif
+
static void
vtterm_cnprobe(struct terminal *tm, struct consdev *cp)
{
@@ -3177,6 +3191,10 @@ vt_upgrade(struct vt_device *vd)
/* For existing console window. */
EVENTHANDLER_REGISTER(shutdown_pre_sync,
vt_window_switch, vw, SHUTDOWN_PRI_DEFAULT);
+#ifdef DEV_SPLASH
+ EVENTHANDLER_REGISTER(shutdown_pre_sync,
+ vt_shutdown_splash, vw, SHUTDOWN_PRI_DEFAULT);
+#endif
}
}
}